diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java index ba892edeb36..bac398b3e59 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java @@ -1777,6 +1777,33 @@ public QuotaUsage getQuotaUsage(Path f) throws IOException { return getContentSummary(f); } + /** + * Set quota for the given {@link Path}. + * + * @param src the target path to set quota for + * @param namespaceQuota the namespace quota (i.e., # of files/directories) + * to set + * @param storagespaceQuota the storage space quota to set + * @throws IOException IO failure + */ + public void setQuota(Path src, final long namespaceQuota, + final long storagespaceQuota) throws IOException { + methodNotSupported(); + } + + /** + * Set per storage type quota for the given {@link Path}. + * + * @param src the target path to set storage type quota for + * @param type the storage type to set + * @param quota the quota to set for the given storage type + * @throws IOException IO failure + */ + public void setQuotaByStorageType(Path src, final StorageType type, + final long quota) throws IOException { + methodNotSupported(); + } + /** * The default filter accepts all paths. */ @@ -4296,6 +4323,22 @@ public FSDataOutputStreamBuilder createFile(Path path) { .create().overwrite(true); } + /** + * Helper method that throws an {@link UnsupportedOperationException} for the + * current {@link FileSystem} method being called. + */ + private void methodNotSupported() { + // The order of the stacktrace elements is (from top to bottom): + // - java.lang.Thread.getStackTrace + // - org.apache.hadoop.fs.FileSystem.methodNotSupported + // - + // therefore, to find out the current method name, we use the element at + // index 2. + String name = Thread.currentThread().getStackTrace()[2].getMethodName(); + throw new UnsupportedOperationException(getClass().getCanonicalName() + + " does not support method " + name); + } + /** * Create a Builder to append a file. * @param path file path. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java index 7c4dfe5d806..c16ea877853 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFilterFileSystem.java @@ -135,6 +135,8 @@ public Token[] addDelegationTokens(String renewer, Credentials creds) public Path fixRelativePart(Path p); public ContentSummary getContentSummary(Path f); public QuotaUsage getQuotaUsage(Path f); + void setQuota(Path f, long namespaceQuota, long storagespaceQuota); + void setQuotaByStorageType(Path f, StorageType type, long quota); StorageStatistics getStorageStatistics(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java index 025b8314fb0..dadbcb73c56 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystem.java @@ -116,6 +116,8 @@ public FSDataOutputStream create(Path f, FsPermission permission, public void processDeleteOnExit(); public ContentSummary getContentSummary(Path f); public QuotaUsage getQuotaUsage(Path f); + void setQuota(Path f, long namespaceQuota, long storagespaceQuota); + void setQuotaByStorageType(Path f, StorageType type, long quota); public FsStatus getStatus(); public FileStatus[] listStatus(Path f, PathFilter filter); public FileStatus[] listStatusBatch(Path f, byte[] token); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index a99ac4dc0d6..fd986256657 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -996,6 +996,7 @@ public QuotaUsage next(final FileSystem fs, final Path p) * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, * long, long, StorageType) */ + @Override public void setQuota(Path src, final long namespaceQuota, final long storagespaceQuota) throws IOException { statistics.incrementWriteOps(1); @@ -1025,6 +1026,7 @@ public Void next(final FileSystem fs, final Path p) * @param quota value of the specific storage type quota to be modified. * Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type. */ + @Override public void setQuotaByStorageType(Path src, final StorageType type, final long quota) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 07af7a81f84..e5a7f8b0163 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -77,6 +77,7 @@ import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; import org.apache.hadoop.fs.QuotaUsage; import org.apache.hadoop.fs.StorageStatistics; +import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.hdfs.DFSOpsCountStatistics; import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType; @@ -1883,6 +1884,48 @@ QuotaUsage decodeResponse(Map json) { }.run(); } + @Override + public void setQuota(Path p, final long namespaceQuota, + final long storagespaceQuota) throws IOException { + // sanity check + if ((namespaceQuota <= 0 && + namespaceQuota != HdfsConstants.QUOTA_RESET) || + (storagespaceQuota < 0 && + storagespaceQuota != HdfsConstants.QUOTA_RESET)) { + throw new IllegalArgumentException("Invalid values for quota : " + + namespaceQuota + " and " + storagespaceQuota); + } + + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SET_QUOTA_USAGE); + + final HttpOpParam.Op op = PutOpParam.Op.SETQUOTA; + new FsPathRunner(op, p, new NameSpaceQuotaParam(namespaceQuota), + new StorageSpaceQuotaParam(storagespaceQuota)).run(); + } + + @Override + public void setQuotaByStorageType(Path path, StorageType type, long quota) + throws IOException { + if (quota <= 0 && quota != HdfsConstants.QUOTA_RESET) { + throw new IllegalArgumentException("Invalid values for quota :" + quota); + } + if (type == null) { + throw new IllegalArgumentException("Invalid storage type (null)"); + } + if (!type.supportTypeQuota()) { + throw new IllegalArgumentException( + "Quota for storage type '" + type.toString() + "' is not supported"); + } + + statistics.incrementWriteOps(1); + storageStatistics.incrementOpCounter(OpType.SET_QUOTA_BYTSTORAGEYPE); + + final HttpOpParam.Op op = PutOpParam.Op.SETQUOTABYSTORAGETYPE; + new FsPathRunner(op, path, new StorageTypeParam(type.name()), + new StorageSpaceQuotaParam(quota)).run(); + } + @Override public MD5MD5CRC32FileChecksum getFileChecksum(final Path p ) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java new file mode 100644 index 00000000000..d85022194b8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/NameSpaceQuotaParam.java @@ -0,0 +1,44 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import org.apache.hadoop.hdfs.protocol.HdfsConstants; + +/** The name space quota parameter for directory. */ +public class NameSpaceQuotaParam extends LongParam { + /** Parameter name. */ + public static final String NAME = "namespacequota"; + /** Default parameter value ({@link Long#MAX_VALUE}). */ + public static final String DEFAULT = "9223372036854775807"; + + private static final Domain DOMAIN = new Domain(NAME); + + public NameSpaceQuotaParam(final Long value) { + super(DOMAIN, value, HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_DONT_SET); + } + + public NameSpaceQuotaParam(final String str) { + this(DOMAIN.parse(str)); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java index 75b1899668a..134ccdcf30f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java @@ -56,6 +56,9 @@ public enum Op implements HttpOpParam.Op { RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK), SETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK), + SETQUOTA(false, HttpURLConnection.HTTP_OK), + SETQUOTABYSTORAGETYPE(false, HttpURLConnection.HTTP_OK), + NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); final boolean doOutputAndRedirect; diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java new file mode 100644 index 00000000000..65b18bdb90b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageSpaceQuotaParam.java @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +import org.apache.hadoop.hdfs.protocol.HdfsConstants; + +/** The storage space quota parameter for directory. */ +public class StorageSpaceQuotaParam extends LongParam { + /** Parameter name. */ + public static final String NAME = "storagespacequota"; + /** Default parameter value ({@link Long#MAX_VALUE}). */ + public static final String DEFAULT = "9223372036854775807"; + + private static final Domain DOMAIN = new Domain(NAME); + + public StorageSpaceQuotaParam(final Long value) { + super(DOMAIN, value, HdfsConstants.QUOTA_RESET, + HdfsConstants.QUOTA_DONT_SET); + } + + public StorageSpaceQuotaParam(final String str) { + this(DOMAIN.parse(str)); + } + + @Override + public String getName() { + return NAME; + } +} + diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java new file mode 100644 index 00000000000..42bf6559403 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/web/resources/StorageTypeParam.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + *

+ * http://www.apache.org/licenses/LICENSE-2.0 + *

+ * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.web.resources; + +/** storage type parameter. */ +public class StorageTypeParam extends StringParam { + /** Parameter name. */ + public static final String NAME = "storagetype"; + /** Default parameter value. */ + public static final String DEFAULT = ""; + + private static final Domain DOMAIN = new Domain(NAME, null); + + public StorageTypeParam(final String str) { + super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str); + } + + @Override + public String getName() { + return NAME; + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java index 977341e10da..f28ccb8355f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterWebHdfsMethods.java @@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.web.resources.HttpOpParam; import org.apache.hadoop.hdfs.web.resources.LengthParam; import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam; +import org.apache.hadoop.hdfs.web.resources.NameSpaceQuotaParam; import org.apache.hadoop.hdfs.web.resources.NewLengthParam; import org.apache.hadoop.hdfs.web.resources.NoRedirectParam; import org.apache.hadoop.hdfs.web.resources.OffsetParam; @@ -78,6 +79,8 @@ import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam; import org.apache.hadoop.hdfs.web.resources.StartAfterParam; import org.apache.hadoop.hdfs.web.resources.StoragePolicyParam; +import org.apache.hadoop.hdfs.web.resources.StorageSpaceQuotaParam; +import org.apache.hadoop.hdfs.web.resources.StorageTypeParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.TokenKindParam; import org.apache.hadoop.hdfs.web.resources.TokenServiceParam; @@ -217,7 +220,10 @@ protected Response put( final CreateFlagParam createFlagParam, final NoRedirectParam noredirectParam, final StoragePolicyParam policyName, - final ECPolicyParam ecpolicy + final ECPolicyParam ecpolicy, + final NameSpaceQuotaParam namespaceQuota, + final StorageSpaceQuotaParam storagespaceQuota, + final StorageTypeParam storageType ) throws IOException, URISyntaxException { switch(op.getValue()) { @@ -264,7 +270,7 @@ protected Response put( accessTime, renameOptions, createParent, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam, - policyName, ecpolicy); + policyName, ecpolicy, namespaceQuota, storagespaceQuota, storageType); } default: throw new UnsupportedOperationException(op + " is not supported"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java index c5c1e40b1b4..dc043935ff8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java @@ -56,6 +56,8 @@ import javax.ws.rs.core.Response.Status; import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.fs.StorageType; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -503,14 +505,24 @@ public Response putRoot( @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam .DEFAULT) final StoragePolicyParam policyName, @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam - .DEFAULT) final ECPolicyParam ecpolicy - ) throws IOException, InterruptedException { + .DEFAULT) final ECPolicyParam ecpolicy, + @QueryParam(NameSpaceQuotaParam.NAME) + @DefaultValue(NameSpaceQuotaParam.DEFAULT) + final NameSpaceQuotaParam namespaceQuota, + @QueryParam(StorageSpaceQuotaParam.NAME) + @DefaultValue(StorageSpaceQuotaParam.DEFAULT) + final StorageSpaceQuotaParam storagespaceQuota, + @QueryParam(StorageTypeParam.NAME) + @DefaultValue(StorageTypeParam.DEFAULT) + final StorageTypeParam storageType + ) throws IOException, InterruptedException { return put(ugi, delegation, username, doAsUser, ROOT, op, destination, owner, group, permission, unmaskedPermission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, createParent, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, - excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy); + excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy, + namespaceQuota, storagespaceQuota, storageType); } /** Validate all required params. */ @@ -592,15 +604,23 @@ public Response put( @QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam .DEFAULT) final StoragePolicyParam policyName, @QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam.DEFAULT) - final ECPolicyParam ecpolicy + final ECPolicyParam ecpolicy, + @QueryParam(NameSpaceQuotaParam.NAME) + @DefaultValue(NameSpaceQuotaParam.DEFAULT) + final NameSpaceQuotaParam namespaceQuota, + @QueryParam(StorageSpaceQuotaParam.NAME) + @DefaultValue(StorageSpaceQuotaParam.DEFAULT) + final StorageSpaceQuotaParam storagespaceQuota, + @QueryParam(StorageTypeParam.NAME) @DefaultValue(StorageTypeParam.DEFAULT) + final StorageTypeParam storageType ) throws IOException, InterruptedException { - init(ugi, delegation, username, doAsUser, path, op, destination, owner, group, permission, unmaskedPermission, overwrite, bufferSize, replication, blockSize, modificationTime, accessTime, renameOptions, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes, - createFlagParam, noredirect, policyName); + createFlagParam, noredirect, policyName, ecpolicy, + namespaceQuota, storagespaceQuota, storageType); return doAs(ugi, new PrivilegedExceptionAction() { @Override @@ -612,7 +632,8 @@ public Response run() throws IOException, URISyntaxException { renameOptions, createParent, delegationTokenArgument, aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes, - createFlagParam, noredirect, policyName, ecpolicy); + createFlagParam, noredirect, policyName, ecpolicy, + namespaceQuota, storagespaceQuota, storageType); } }); } @@ -648,7 +669,10 @@ protected Response put( final CreateFlagParam createFlagParam, final NoRedirectParam noredirectParam, final StoragePolicyParam policyName, - final ECPolicyParam ecpolicy + final ECPolicyParam ecpolicy, + final NameSpaceQuotaParam namespaceQuota, + final StorageSpaceQuotaParam storagespaceQuota, + final StorageTypeParam storageType ) throws IOException, URISyntaxException { final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); final ClientProtocol cp = getRpcClientProtocol(); @@ -821,6 +845,17 @@ protected Response put( validateOpParams(op, ecpolicy); cp.setErasureCodingPolicy(fullpath, ecpolicy.getValue()); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + case SETQUOTA: + validateOpParams(op, namespaceQuota, storagespaceQuota); + cp.setQuota(fullpath, namespaceQuota.getValue(), + storagespaceQuota.getValue(), null); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); + case SETQUOTABYSTORAGETYPE: + validateOpParams(op, storagespaceQuota, storageType); + cp.setQuota(fullpath, HdfsConstants.QUOTA_DONT_SET, + storagespaceQuota.getValue(), + StorageType.parseStorageType(storageType.getValue())); + return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); default: throw new UnsupportedOperationException(op + " is not supported"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md index 3e051c27a79..a7ae269bfa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md +++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/WebHDFS.md @@ -818,6 +818,34 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getConten See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage +### Set Quota + +* Submit a HTTP PUT request. + + curl -i -X PUT "http://:/webhdfs/v1/?op=SETQUOTA + &namespacequota=[&storagespacequota=]" + + The client receives a response with zero content length: + + HTTP/1.1 200 OK + Content-Length: 0 + +See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuota + +### Set Quota By Storage Type + +* Submit a HTTP PUT request. + + curl -i -X PUT "http://:/webhdfs/v1/?op=SETQUOTABYSTORAGETYPE + &storagetype=&storagespacequota=" + + The client receives a response with zero content length: + + HTTP/1.1 200 OK + Content-Length: 0 + +See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuotaByStorageType + ### Get File Checksum * Submit a HTTP GET request. @@ -3130,6 +3158,42 @@ See also: [Authentication](#Authentication) See also: [Create and Write to a File](#Create_and_Write_to_a_File) +### Namespace Quota + +| Name | `namespacequota` | +|:---- |:---- | +| Description | Limit on the namespace usage, i.e., number of files/directories, under a directory. | +| Type | String | +| Default Value | Long.MAX_VALUE | +| Valid Values | \> 0. | +| Syntax | Any integer. | + +See also: [`SETQUOTA`](#Set_Quota) + +### Storage Space Quota + +| Name | `storagespacequota` | +|:---- |:---- | +| Description | Limit on storage space usage (in bytes, including replication) under a directory. | +| Type | String | +| Default Value | Long.MAX_VALUE | +| Valid Values | \> 0. | +| Syntax | Any integer. | + +See also: [`SETQUOTA`](#Set_Quota), [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type) + +### Storage Type + +| Name | `storagetype` | +|:---- |:---- | +| Description | Storage type of the specific storage type quota to be modified. | +| Type | String | +| Default Value | \ | +| Valid Values | Any valid storage type. | +| Syntax | Any string. | + +See also: [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type) + ### Storage Policy | Name | `storagepolicy` | diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java index 0261a59c99d..8c1ad7314b8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java @@ -57,6 +57,7 @@ import com.google.common.collect.ImmutableList; import org.apache.commons.io.IOUtils; import org.apache.hadoop.fs.QuotaUsage; +import org.apache.hadoop.test.LambdaTestUtils; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -1131,9 +1132,66 @@ public void testQuotaUsage() throws Exception { cluster.shutdown(); } } - } + @Test + public void testSetQuota() throws Exception { + MiniDFSCluster cluster = null; + final Configuration conf = WebHdfsTestUtil.createConf(); + final Path path = new Path("/TestDir"); + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); + final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem( + conf, WebHdfsConstants.WEBHDFS_SCHEME); + final DistributedFileSystem dfs = cluster.getFileSystem(); + + final long nsQuota = 100; + final long spaceQuota = 1024; + + webHdfs.mkdirs(path); + + webHdfs.setQuota(path, nsQuota, spaceQuota); + QuotaUsage quotaUsage = dfs.getQuotaUsage(path); + assertEquals(nsQuota, quotaUsage.getQuota()); + assertEquals(spaceQuota, quotaUsage.getSpaceQuota()); + + webHdfs.setQuota(path, + HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET); + quotaUsage = dfs.getQuotaUsage(path); + assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota()); + assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota()); + + webHdfs.setQuotaByStorageType(path, StorageType.DISK, spaceQuota); + webHdfs.setQuotaByStorageType(path, StorageType.ARCHIVE, spaceQuota); + webHdfs.setQuotaByStorageType(path, StorageType.SSD, spaceQuota); + quotaUsage = dfs.getQuotaUsage(path); + assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.DISK)); + assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.ARCHIVE)); + assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.SSD)); + + // Test invalid parameters + + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuota(path, -100, 100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuota(path, 100, -100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, null, 100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100)); + LambdaTestUtils.intercept(IllegalArgumentException.class, + () -> webHdfs.setQuotaByStorageType(path, StorageType.RAM_DISK, 100)); + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + + @Test public void testWebHdfsPread() throws Exception { final Configuration conf = WebHdfsTestUtil.createConf(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java index c9247dfa7dd..95078a5beee 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.web.resources; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; @@ -25,6 +26,7 @@ import java.util.EnumSet; import java.util.List; +import org.apache.hadoop.fs.StorageType; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -505,6 +507,33 @@ public void testStoragePolicyParam() { Assert.assertEquals("COLD", p.getValue()); } + @Test + public void testNamespaceQuotaParam() { + NameSpaceQuotaParam p = + new NameSpaceQuotaParam(NameSpaceQuotaParam.DEFAULT); + assertEquals(Long.valueOf(NameSpaceQuotaParam.DEFAULT), p.getValue()); + p = new NameSpaceQuotaParam(100L); + assertEquals(100L, p.getValue().longValue()); + } + + @Test + public void testStorageSpaceQuotaParam() { + StorageSpaceQuotaParam sp = new StorageSpaceQuotaParam( + StorageSpaceQuotaParam.DEFAULT); + assertEquals(Long.valueOf(StorageSpaceQuotaParam.DEFAULT), + sp.getValue()); + sp = new StorageSpaceQuotaParam(100L); + assertEquals(100L, sp.getValue().longValue()); + } + + @Test + public void testStorageTypeParam() { + StorageTypeParam p = new StorageTypeParam(StorageTypeParam.DEFAULT); + assertNull(p.getValue()); + p = new StorageTypeParam(StorageType.DISK.name()); + assertEquals(StorageType.DISK.name(), p.getValue()); + } + @Test public void testECPolicyParam() { ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);