From 29bd6f3fc3bd78b439d61768885c9f3e7f31a540 Mon Sep 17 00:00:00 2001 From: Surendra Singh Lilhore Date: Wed, 28 Aug 2019 23:58:23 +0530 Subject: [PATCH] HDFS-8631. WebHDFS : Support setQuota. Contributed by Chao Sun. --- .../java/org/apache/hadoop/fs/FileSystem.java | 43 +++++++++++++ .../hadoop/fs/TestFilterFileSystem.java | 2 + .../apache/hadoop/fs/TestHarFileSystem.java | 2 + .../hadoop/hdfs/DistributedFileSystem.java | 2 + .../hadoop/hdfs/web/WebHdfsFileSystem.java | 43 +++++++++++++ .../web/resources/NameSpaceQuotaParam.java | 44 +++++++++++++ .../hadoop/hdfs/web/resources/PutOpParam.java | 3 + .../web/resources/StorageSpaceQuotaParam.java | 45 +++++++++++++ .../hdfs/web/resources/StorageTypeParam.java | 37 +++++++++++ .../router/RouterWebHdfsMethods.java | 10 ++- .../web/resources/NamenodeWebHdfsMethods.java | 51 ++++++++++++--- .../hadoop-hdfs/src/site/markdown/WebHDFS.md | 64 +++++++++++++++++++ .../apache/hadoop/hdfs/web/TestWebHDFS.java | 60 ++++++++++++++++- .../hadoop/hdfs/web/resources/TestParam.java | 29 +++++++++ 14 files changed, 424 insertions(+), 11 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index 3cbab1afb1b..059a7d4d2e7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -1788,6 +1788,33 @@ public abstract class FileSystem extends Configured return getContentSummary(f); } + /** + * Set quota for the given {@link Path}. + * + * @param src the target path to set quota for + * @param namespaceQuota the namespace quota (i.e., # of files/directories) + * to set + * @param storagespaceQuota the storage space quota to set + * @throws IOException IO failure + */ + public void setQuota(Path src, final long namespaceQuota, + final long storagespaceQuota) throws IOException { + methodNotSupported(); + } + + /** + * Set per storage type quota for the given {@link Path}. + * + * @param src the target path to set storage type quota for + * @param type the storage type to set + * @param quota the quota to set for the given storage type + * @throws IOException IO failure + */ + public void setQuotaByStorageType(Path src, final StorageType type, + final long quota) throws IOException { + methodNotSupported(); + } + /** * The default filter accepts all paths. */ @@ -4455,6 +4482,22 @@ public abstract class FileSystem extends Configured return result; } + /** + * Helper method that throws an {@link UnsupportedOperationException} for the + * current {@link FileSystem} method being called. + */ + private void methodNotSupported() { + // The order of the stacktrace elements is (from top to bottom): + // - java.lang.Thread.getStackTrace + // - org.apache.hadoop.fs.FileSystem.methodNotSupported + // - + // therefore, to find out the current method name, we use the element at + // index 2. + String name = Thread.currentThread().getStackTrace()[2].getMethodName(); + throw new UnsupportedOperationException(getClass().getCanonicalName() + + " does not support method " + name); + } + /** * Create instance of the standard {@link FSDataInputStreamBuilder} for the * given filesystem and path. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java index e7f42ff2b56..6de4f074105 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java @@ -135,6 +135,8 @@ public class TestFilterFileSystem { public Path fixRelativePart(Path p); public ContentSummary getContentSummary(Path f); public QuotaUsage getQuotaUsage(Path f); + void setQuota(Path f, long namespaceQuota, long storagespaceQuota); + void setQuotaByStorageType(Path f, StorageType type, long quota); StorageStatistics getStorageStatistics(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 57798c2c8b9..b442553924f 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -118,6 +118,8 @@ public class TestHarFileSystem { public void processDeleteOnExit(); public ContentSummary getContentSummary(Path f); public QuotaUsage getQuotaUsage(Path f); + void setQuota(Path f, long namespaceQuota, long storagespaceQuota); + void setQuotaByStorageType(Path f, StorageType type, long quota); public FsStatus getStatus(); public FileStatus[] listStatus(Path f, PathFilter filter); public FileStatus[] listStatusBatch(Path f, byte[] token); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 4eb9b32fe93..73abb991f99 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -1000,6 +1000,7 @@ public class DistributedFileSystem extends FileSystem * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, * long, long, StorageType) */ + @Override public void setQuota(Path src, final long namespaceQuota, final long storagespaceQuota) throws IOException { statistics.incrementWriteOps(1); @@ -1029,6 +1030,7 @@ public class DistributedFileSystem extends FileSystem * @param quota value of the specific storage type quota to be modified. * Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type. */ + @Override public void setQuotaByStorageType(Path src, final StorageType type, final long quota) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 7d9e6d1f0d5..baebdc1654a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -75,6 +75,7 @@ import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.hdfs.DFSOpsCountStatistics; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; @@ -1873,6 +1874,48 @@ public class WebHdfsFileSystem extends FileSystem }.run(); } + @Override + public void setQuota(Path p, final long namespaceQuota, + final long storagespaceQuota) throws IOException { + // sanity check + if ((namespaceQuota <= 0 && + namespaceQuota != HdfsConstants.QUOTA_RESET) || + (storagespaceQuota < 0 && + storagespaceQuota != HdfsConstants.QUOTA_RESET)) { + throw new IllegalArgumentException("Invalid values for quota : " + + namespaceQuota + " and " + storagespaceQuota); + } + + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SET_QUOTA_USAGE); + + final HttpOpParam.Op op = PutOpParam.Op.SETQUOTA; + new FsPathRunner(op, p, new NameSpaceQuotaParam(namespaceQuota), + new StorageSpaceQuotaParam(storagespaceQuota)).run(); + } + + @Override + public void setQuotaByStorageType(Path path, StorageType type, long quota) + throws IOException { + if (quota <= 0 && quota != HdfsConstants.QUOTA_RESET) { + throw new IllegalArgumentException("Invalid values for quota :" + quota); + } + if (type == null) { + throw new IllegalArgumentException("Invalid storage type (null)"); + } + if (!type.supportTypeQuota()) { + throw new IllegalArgumentException( + "Quota for storage type '" + type.toString() + "' is not supported"); + } + + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SET_QUOTA_BYTSTORAGEYPE); + + final HttpOpParam.Op op = PutOpParam.Op.SETQUOTABYSTORAGETYPE; + new FsPathRunner(op, path, new StorageTypeParam(type.name()), + new StorageSpaceQuotaParam(quota)).run(); + } + @Override public MD5MD5CRC32FileChecksum getFileChecksum(final Path p ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java new file mode 100644 index 00000000000..d85022194b8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import org.apache.hadoop.hdfs.protocol.HdfsConstants; + +/** The name space quota parameter for directory. */ +public class NameSpaceQuotaParam extends LongParam { + /** Parameter name. */ + public static final String NAME = "namespacequota"; + /** Default parameter value ({@link Long#MAX_VALUE}). */ + public static final String DEFAULT = "9223372036854775807"; + + private static final Domain DOMAIN = new Domain(NAME); + + public NameSpaceQuotaParam(final Long value) { + super(DOMAIN, value, HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_DONT_SET); + } + + public NameSpaceQuotaParam(final String str) { + this(DOMAIN.parse(str)); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java index e3d9294156e..4fc00011ca1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java @@ -57,6 +57,9 @@ public class PutOpParam extends HttpOpParam { RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK), SETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK), + SETQUOTA(false, HttpURLConnection.HTTP_OK), + SETQUOTABYSTORAGETYPE(false, HttpURLConnection.HTTP_OK), + NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); final boolean doOutputAndRedirect; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java new file mode 100644 index 00000000000..65b18bdb90b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import org.apache.hadoop.hdfs.protocol.HdfsConstants; + +/** The storage space quota parameter for directory. */ +public class StorageSpaceQuotaParam extends LongParam { + /** Parameter name. */ + public static final String NAME = "storagespacequota"; + /** Default parameter value ({@link Long#MAX_VALUE}). */ + public static final String DEFAULT = "9223372036854775807"; + + private static final Domain DOMAIN = new Domain(NAME); + + public StorageSpaceQuotaParam(final Long value) { + super(DOMAIN, value, HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_DONT_SET); + } + + public StorageSpaceQuotaParam(final String str) { + this(DOMAIN.parse(str)); + } + + @Override + public String getName() { + return NAME; + } +} + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java new file mode 100644 index 00000000000..42bf6559403 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +/** storage type parameter. */ +public class StorageTypeParam extends StringParam { + /** Parameter name. */ + public static final String NAME = "storagetype"; + /** Default parameter value. */ + public static final String DEFAULT = ""; + + private static final Domain DOMAIN = new Domain(NAME, null); + + public StorageTypeParam(final String str) { + super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index 6bc6bcc17dc..9f0d06d7695 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.web.resources.GroupParam; import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.LengthParam; import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam; +import org.apache.hadoop.hdfs.web.resources.NameSpaceQuotaParam; import org.apache.hadoop.hdfs.web.resources.NewLengthParam; import org.apache.hadoop.hdfs.web.resources.NoRedirectParam; import org.apache.hadoop.hdfs.web.resources.OffsetParam; @@ -73,6 +74,8 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam; import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam; import org.apache.hadoop.hdfs.web.resources.StartAfterParam; import org.apache.hadoop.hdfs.web.resources.StoragePolicyParam; +import org.apache.hadoop.hdfs.web.resources.StorageSpaceQuotaParam; +import org.apache.hadoop.hdfs.web.resources.StorageTypeParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.TokenKindParam; import org.apache.hadoop.hdfs.web.resources.TokenServiceParam; @@ -209,7 +212,10 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods { final CreateFlagParam createFlagParam, final NoRedirectParam noredirectParam, final StoragePolicyParam policyName, - final ECPolicyParam ecpolicy + final ECPolicyParam ecpolicy, + final NameSpaceQuotaParam namespaceQuota, + final StorageSpaceQuotaParam storagespaceQuota, + final StorageTypeParam storageType ) throws IOException, URISyntaxException { switch(op.getValue()) { @@ -261,7 +267,7 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods { accessTime, renameOptions, createParent, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam, - policyName, ecpolicy); + policyName, ecpolicy, namespaceQuota, storagespaceQuota, storageType); } default: throw new UnsupportedOperationException(op + " is not supported"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index 9ee38cff175..78eba410bd8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -56,6 +56,8 @@ import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.Status; import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -509,14 +511,24 @@ public class NamenodeWebHdfsMethods { @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam .DEFAULT) final StoragePolicyParam policyName, @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam - .DEFAULT) final ECPolicyParam ecpolicy - ) throws IOException, InterruptedException { + .DEFAULT) final ECPolicyParam ecpolicy, + @QueryParam(NameSpaceQuotaParam.NAME) + @DefaultValue(NameSpaceQuotaParam.DEFAULT) + final NameSpaceQuotaParam namespaceQuota, + @QueryParam(StorageSpaceQuotaParam.NAME) + @DefaultValue(StorageSpaceQuotaParam.DEFAULT) + final StorageSpaceQuotaParam storagespaceQuota, + @QueryParam(StorageTypeParam.NAME) + @DefaultValue(StorageTypeParam.DEFAULT) + final StorageTypeParam storageType + ) throws IOException, InterruptedException { return put(ugi, delegation, username, doAsUser, ROOT, op, destination, owner, group, permission, unmaskedPermission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, createParent, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, - excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy); + excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy, + namespaceQuota, storagespaceQuota, storageType); } /** Validate all required params. */ @@ -598,15 +610,23 @@ public class NamenodeWebHdfsMethods { @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam .DEFAULT) final StoragePolicyParam policyName, @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam.DEFAULT) - final ECPolicyParam ecpolicy + final ECPolicyParam ecpolicy, + @QueryParam(NameSpaceQuotaParam.NAME) + @DefaultValue(NameSpaceQuotaParam.DEFAULT) + final NameSpaceQuotaParam namespaceQuota, + @QueryParam(StorageSpaceQuotaParam.NAME) + @DefaultValue(StorageSpaceQuotaParam.DEFAULT) + final StorageSpaceQuotaParam storagespaceQuota, + @QueryParam(StorageTypeParam.NAME) @DefaultValue(StorageTypeParam.DEFAULT) + final StorageTypeParam storageType ) throws IOException, InterruptedException { - init(ugi, delegation, username, doAsUser, path, op, destination, owner, group, permission, unmaskedPermission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes, - createFlagParam, noredirect, policyName); + createFlagParam, noredirect, policyName, ecpolicy, + namespaceQuota, storagespaceQuota, storageType); return doAs(ugi, new PrivilegedExceptionAction() { @Override @@ -618,7 +638,8 @@ public class NamenodeWebHdfsMethods { renameOptions, createParent, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes, - createFlagParam, noredirect, policyName, ecpolicy); + createFlagParam, noredirect, policyName, ecpolicy, + namespaceQuota, storagespaceQuota, storageType); } }); } @@ -654,7 +675,10 @@ public class NamenodeWebHdfsMethods { final CreateFlagParam createFlagParam, final NoRedirectParam noredirectParam, final StoragePolicyParam policyName, - final ECPolicyParam ecpolicy + final ECPolicyParam ecpolicy, + final NameSpaceQuotaParam namespaceQuota, + final StorageSpaceQuotaParam storagespaceQuota, + final StorageTypeParam storageType ) throws IOException, URISyntaxException { final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); final ClientProtocol cp = getRpcClientProtocol(); @@ -831,6 +855,17 @@ public class NamenodeWebHdfsMethods { validateOpParams(op, ecpolicy); cp.setErasureCodingPolicy(fullpath, ecpolicy.getValue()); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + case SETQUOTA: + validateOpParams(op, namespaceQuota, storagespaceQuota); + cp.setQuota(fullpath, namespaceQuota.getValue(), + storagespaceQuota.getValue(), null); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + case SETQUOTABYSTORAGETYPE: + validateOpParams(op, storagespaceQuota, storageType); + cp.setQuota(fullpath, HdfsConstants.QUOTA_DONT_SET, + storagespaceQuota.getValue(), + StorageType.parseStorageType(storageType.getValue())); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); default: throw new UnsupportedOperationException(op + " is not supported"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 8afb7af933a..9e1b160d6f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -832,6 +832,34 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getConten See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage +### Set Quota + +* Submit a HTTP PUT request. + + curl -i -X PUT "http://:/webhdfs/v1/?op=SETQUOTA + &namespacequota=[&storagespacequota=]" + + The client receives a response with zero content length: + + HTTP/1.1 200 OK + Content-Length: 0 + +See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuota + +### Set Quota By Storage Type + +* Submit a HTTP PUT request. + + curl -i -X PUT "http://:/webhdfs/v1/?op=SETQUOTABYSTORAGETYPE + &storagetype=&storagespacequota=" + + The client receives a response with zero content length: + + HTTP/1.1 200 OK + Content-Length: 0 + +See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuotaByStorageType + ### Get File Checksum * Submit a HTTP GET request. @@ -3179,6 +3207,42 @@ See also: [Authentication](#Authentication) See also: [Create and Write to a File](#Create_and_Write_to_a_File) +### Namespace Quota + +| Name | `namespacequota` | +|:---- |:---- | +| Description | Limit on the namespace usage, i.e., number of files/directories, under a directory. | +| Type | String | +| Default Value | Long.MAX_VALUE | +| Valid Values | \> 0. | +| Syntax | Any integer. | + +See also: [`SETQUOTA`](#Set_Quota) + +### Storage Space Quota + +| Name | `storagespacequota` | +|:---- |:---- | +| Description | Limit on storage space usage (in bytes, including replication) under a directory. | +| Type | String | +| Default Value | Long.MAX_VALUE | +| Valid Values | \> 0. | +| Syntax | Any integer. | + +See also: [`SETQUOTA`](#Set_Quota), [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type) + +### Storage Type + +| Name | `storagetype` | +|:---- |:---- | +| Description | Storage type of the specific storage type quota to be modified. | +| Type | String | +| Default Value | \ | +| Valid Values | Any valid storage type. | +| Syntax | Any string. | + +See also: [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type) + ### Storage Policy | Name | `storagepolicy` | diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 9d9c905043e..b77c8cbc6d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -55,6 +55,7 @@ import java.util.Random; import com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.test.LambdaTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -1129,9 +1130,66 @@ public class TestWebHDFS { cluster.shutdown(); } } - } + @Test + public void testSetQuota() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + final Path path = new Path("/TestDir"); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsConstants.WEBHDFS_SCHEME); + final DistributedFileSystem dfs = cluster.getFileSystem(); + + final long nsQuota = 100; + final long spaceQuota = 1024; + + webHdfs.mkdirs(path); + + webHdfs.setQuota(path, nsQuota, spaceQuota); + QuotaUsage quotaUsage = dfs.getQuotaUsage(path); + assertEquals(nsQuota, quotaUsage.getQuota()); + assertEquals(spaceQuota, quotaUsage.getSpaceQuota()); + + webHdfs.setQuota(path, + HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET); + quotaUsage = dfs.getQuotaUsage(path); + assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota()); + assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota()); + + webHdfs.setQuotaByStorageType(path, StorageType.DISK, spaceQuota); + webHdfs.setQuotaByStorageType(path, StorageType.ARCHIVE, spaceQuota); + webHdfs.setQuotaByStorageType(path, StorageType.SSD, spaceQuota); + quotaUsage = dfs.getQuotaUsage(path); + assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.DISK)); + assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.ARCHIVE)); + assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.SSD)); + + // Test invalid parameters + + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuota(path, -100, 100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuota(path, 100, -100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, null, 100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, StorageType.RAM_DISK, 100)); + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test public void testWebHdfsPread() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java index c9247dfa7dd..95078a5beee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.web.resources; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -25,6 +26,7 @@ import java.util.Arrays; import java.util.EnumSet; import java.util.List; +import org.apache.hadoop.fs.StorageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -505,6 +507,33 @@ public class TestParam { Assert.assertEquals("COLD", p.getValue()); } + @Test + public void testNamespaceQuotaParam() { + NameSpaceQuotaParam p = + new NameSpaceQuotaParam(NameSpaceQuotaParam.DEFAULT); + assertEquals(Long.valueOf(NameSpaceQuotaParam.DEFAULT), p.getValue()); + p = new NameSpaceQuotaParam(100L); + assertEquals(100L, p.getValue().longValue()); + } + + @Test + public void testStorageSpaceQuotaParam() { + StorageSpaceQuotaParam sp = new StorageSpaceQuotaParam( + StorageSpaceQuotaParam.DEFAULT); + assertEquals(Long.valueOf(StorageSpaceQuotaParam.DEFAULT), + sp.getValue()); + sp = new StorageSpaceQuotaParam(100L); + assertEquals(100L, sp.getValue().longValue()); + } + + @Test + public void testStorageTypeParam() { + StorageTypeParam p = new StorageTypeParam(StorageTypeParam.DEFAULT); + assertNull(p.getValue()); + p = new StorageTypeParam(StorageType.DISK.name()); + assertEquals(StorageType.DISK.name(), p.getValue()); + } + @Test public void testECPolicyParam() { ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);