HDFS-8631. WebHDFS : Support setQuota. Contributed by Chao Sun.

This commit is contained in:
Surendra Singh Lilhore 2019-08-28 23:58:23 +05:30
parent 48cb583906
commit 29bd6f3fc3
14 changed files with 424 additions and 11 deletions

View File

@ -1788,6 +1788,33 @@ public abstract class FileSystem extends Configured
return getContentSummary(f);
}
/**
* Set quota for the given {@link Path}.
*
* @param src the target path to set quota for
* @param namespaceQuota the namespace quota (i.e., # of files/directories)
* to set
* @param storagespaceQuota the storage space quota to set
* @throws IOException IO failure
*/
public void setQuota(Path src, final long namespaceQuota,
final long storagespaceQuota) throws IOException {
methodNotSupported();
}
/**
* Set per storage type quota for the given {@link Path}.
*
* @param src the target path to set storage type quota for
* @param type the storage type to set
* @param quota the quota to set for the given storage type
* @throws IOException IO failure
*/
public void setQuotaByStorageType(Path src, final StorageType type,
final long quota) throws IOException {
methodNotSupported();
}
/**
* The default filter accepts all paths.
*/
@ -4455,6 +4482,22 @@ public abstract class FileSystem extends Configured
return result;
}
/**
* Helper method that throws an {@link UnsupportedOperationException} for the
* current {@link FileSystem} method being called.
*/
private void methodNotSupported() {
// The order of the stacktrace elements is (from top to bottom):
// - java.lang.Thread.getStackTrace
// - org.apache.hadoop.fs.FileSystem.methodNotSupported
// - <the FileSystem method>
// therefore, to find out the current method name, we use the element at
// index 2.
String name = Thread.currentThread().getStackTrace()[2].getMethodName();
throw new UnsupportedOperationException(getClass().getCanonicalName() +
" does not support method " + name);
}
/**
* Create instance of the standard {@link FSDataInputStreamBuilder} for the
* given filesystem and path.

View File

@ -135,6 +135,8 @@ public class TestFilterFileSystem {
public Path fixRelativePart(Path p);
public ContentSummary getContentSummary(Path f);
public QuotaUsage getQuotaUsage(Path f);
void setQuota(Path f, long namespaceQuota, long storagespaceQuota);
void setQuotaByStorageType(Path f, StorageType type, long quota);
StorageStatistics getStorageStatistics();
}

View File

@ -118,6 +118,8 @@ public class TestHarFileSystem {
public void processDeleteOnExit();
public ContentSummary getContentSummary(Path f);
public QuotaUsage getQuotaUsage(Path f);
void setQuota(Path f, long namespaceQuota, long storagespaceQuota);
void setQuotaByStorageType(Path f, StorageType type, long quota);
public FsStatus getStatus();
public FileStatus[] listStatus(Path f, PathFilter filter);
public FileStatus[] listStatusBatch(Path f, byte[] token);

View File

@ -1000,6 +1000,7 @@ public class DistributedFileSystem extends FileSystem
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String,
* long, long, StorageType)
*/
@Override
public void setQuota(Path src, final long namespaceQuota,
final long storagespaceQuota) throws IOException {
statistics.incrementWriteOps(1);
@ -1029,6 +1030,7 @@ public class DistributedFileSystem extends FileSystem
* @param quota value of the specific storage type quota to be modified.
* Maybe {@link HdfsConstants#QUOTA_RESET} to clear quota by storage type.
*/
@Override
public void setQuotaByStorageType(Path src, final StorageType type,
final long quota)
throws IOException {

View File

@ -75,6 +75,7 @@ import org.apache.hadoop.fs.GlobalStorageStatistics;
import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsCreateModes;
import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
import org.apache.hadoop.hdfs.DFSOpsCountStatistics.OpType;
@ -1873,6 +1874,48 @@ public class WebHdfsFileSystem extends FileSystem
}.run();
}
@Override
public void setQuota(Path p, final long namespaceQuota,
final long storagespaceQuota) throws IOException {
// sanity check
if ((namespaceQuota <= 0 &&
namespaceQuota != HdfsConstants.QUOTA_RESET) ||
(storagespaceQuota < 0 &&
storagespaceQuota != HdfsConstants.QUOTA_RESET)) {
throw new IllegalArgumentException("Invalid values for quota : " +
namespaceQuota + " and " + storagespaceQuota);
}
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SET_QUOTA_USAGE);
final HttpOpParam.Op op = PutOpParam.Op.SETQUOTA;
new FsPathRunner(op, p, new NameSpaceQuotaParam(namespaceQuota),
new StorageSpaceQuotaParam(storagespaceQuota)).run();
}
@Override
public void setQuotaByStorageType(Path path, StorageType type, long quota)
throws IOException {
if (quota <= 0 && quota != HdfsConstants.QUOTA_RESET) {
throw new IllegalArgumentException("Invalid values for quota :" + quota);
}
if (type == null) {
throw new IllegalArgumentException("Invalid storage type (null)");
}
if (!type.supportTypeQuota()) {
throw new IllegalArgumentException(
"Quota for storage type '" + type.toString() + "' is not supported");
}
statistics.incrementWriteOps(1);
storageStatistics.incrementOpCounter(OpType.SET_QUOTA_BYTSTORAGEYPE);
final HttpOpParam.Op op = PutOpParam.Op.SETQUOTABYSTORAGETYPE;
new FsPathRunner(op, path, new StorageTypeParam(type.name()),
new StorageSpaceQuotaParam(quota)).run();
}
@Override
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
) throws IOException {

View File

@ -0,0 +1,44 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
/** The name space quota parameter for directory. */
public class NameSpaceQuotaParam extends LongParam {
/** Parameter name. */
public static final String NAME = "namespacequota";
/** Default parameter value ({@link Long#MAX_VALUE}). */
public static final String DEFAULT = "9223372036854775807";
private static final Domain DOMAIN = new Domain(NAME);
public NameSpaceQuotaParam(final Long value) {
super(DOMAIN, value, HdfsConstants.QUOTA_RESET,
HdfsConstants.QUOTA_DONT_SET);
}
public NameSpaceQuotaParam(final String str) {
this(DOMAIN.parse(str));
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -57,6 +57,9 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
SETSTORAGEPOLICY(false, HttpURLConnection.HTTP_OK),
SETQUOTA(false, HttpURLConnection.HTTP_OK),
SETQUOTABYSTORAGETYPE(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final boolean doOutputAndRedirect;

View File

@ -0,0 +1,45 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
/** The storage space quota parameter for directory. */
public class StorageSpaceQuotaParam extends LongParam {
/** Parameter name. */
public static final String NAME = "storagespacequota";
/** Default parameter value ({@link Long#MAX_VALUE}). */
public static final String DEFAULT = "9223372036854775807";
private static final Domain DOMAIN = new Domain(NAME);
public StorageSpaceQuotaParam(final Long value) {
super(DOMAIN, value, HdfsConstants.QUOTA_RESET,
HdfsConstants.QUOTA_DONT_SET);
}
public StorageSpaceQuotaParam(final String str) {
this(DOMAIN.parse(str));
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -0,0 +1,37 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
* <p>
* http://www.apache.org/licenses/LICENSE-2.0
* <p>
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
/** storage type parameter. */
public class StorageTypeParam extends StringParam {
/** Parameter name. */
public static final String NAME = "storagetype";
/** Default parameter value. */
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
public StorageTypeParam(final String str) {
super(DOMAIN, str == null || str.equals(DEFAULT) ? null : str);
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
import org.apache.hadoop.hdfs.web.resources.NameSpaceQuotaParam;
import org.apache.hadoop.hdfs.web.resources.NewLengthParam;
import org.apache.hadoop.hdfs.web.resources.NoRedirectParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam;
@ -73,6 +74,8 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
import org.apache.hadoop.hdfs.web.resources.StartAfterParam;
import org.apache.hadoop.hdfs.web.resources.StoragePolicyParam;
import org.apache.hadoop.hdfs.web.resources.StorageSpaceQuotaParam;
import org.apache.hadoop.hdfs.web.resources.StorageTypeParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.TokenKindParam;
import org.apache.hadoop.hdfs.web.resources.TokenServiceParam;
@ -209,7 +212,10 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
final CreateFlagParam createFlagParam,
final NoRedirectParam noredirectParam,
final StoragePolicyParam policyName,
final ECPolicyParam ecpolicy
final ECPolicyParam ecpolicy,
final NameSpaceQuotaParam namespaceQuota,
final StorageSpaceQuotaParam storagespaceQuota,
final StorageTypeParam storageType
) throws IOException, URISyntaxException {
switch(op.getValue()) {
@ -261,7 +267,7 @@ public class RouterWebHdfsMethods extends NamenodeWebHdfsMethods {
accessTime, renameOptions, createParent, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag, snapshotName,
oldSnapshotName, exclDatanodes, createFlagParam, noredirectParam,
policyName, ecpolicy);
policyName, ecpolicy, namespaceQuota, storagespaceQuota, storageType);
}
default:
throw new UnsupportedOperationException(op + " is not supported");

View File

@ -56,6 +56,8 @@ import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@ -509,14 +511,24 @@ public class NamenodeWebHdfsMethods {
@QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
.DEFAULT) final StoragePolicyParam policyName,
@QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam
.DEFAULT) final ECPolicyParam ecpolicy
.DEFAULT) final ECPolicyParam ecpolicy,
@QueryParam(NameSpaceQuotaParam.NAME)
@DefaultValue(NameSpaceQuotaParam.DEFAULT)
final NameSpaceQuotaParam namespaceQuota,
@QueryParam(StorageSpaceQuotaParam.NAME)
@DefaultValue(StorageSpaceQuotaParam.DEFAULT)
final StorageSpaceQuotaParam storagespaceQuota,
@QueryParam(StorageTypeParam.NAME)
@DefaultValue(StorageTypeParam.DEFAULT)
final StorageTypeParam storageType
) throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, unmaskedPermission, overwrite, bufferSize,
replication, blockSize, modificationTime, accessTime, renameOptions,
createParent, delegationTokenArgument, aclPermission, xattrName,
xattrValue, xattrSetFlag, snapshotName, oldSnapshotName,
excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy);
excludeDatanodes, createFlagParam, noredirect, policyName, ecpolicy,
namespaceQuota, storagespaceQuota, storageType);
}
/** Validate all required params. */
@ -598,15 +610,23 @@ public class NamenodeWebHdfsMethods {
@QueryParam(StoragePolicyParam.NAME) @DefaultValue(StoragePolicyParam
.DEFAULT) final StoragePolicyParam policyName,
@QueryParam(ECPolicyParam.NAME) @DefaultValue(ECPolicyParam.DEFAULT)
final ECPolicyParam ecpolicy
final ECPolicyParam ecpolicy,
@QueryParam(NameSpaceQuotaParam.NAME)
@DefaultValue(NameSpaceQuotaParam.DEFAULT)
final NameSpaceQuotaParam namespaceQuota,
@QueryParam(StorageSpaceQuotaParam.NAME)
@DefaultValue(StorageSpaceQuotaParam.DEFAULT)
final StorageSpaceQuotaParam storagespaceQuota,
@QueryParam(StorageTypeParam.NAME) @DefaultValue(StorageTypeParam.DEFAULT)
final StorageTypeParam storageType
) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner,
group, permission, unmaskedPermission, overwrite, bufferSize,
replication, blockSize, modificationTime, accessTime, renameOptions,
delegationTokenArgument, aclPermission, xattrName, xattrValue,
xattrSetFlag, snapshotName, oldSnapshotName, excludeDatanodes,
createFlagParam, noredirect, policyName);
createFlagParam, noredirect, policyName, ecpolicy,
namespaceQuota, storagespaceQuota, storageType);
return doAs(ugi, new PrivilegedExceptionAction<Response>() {
@Override
@ -618,7 +638,8 @@ public class NamenodeWebHdfsMethods {
renameOptions, createParent, delegationTokenArgument,
aclPermission, xattrName, xattrValue, xattrSetFlag,
snapshotName, oldSnapshotName, excludeDatanodes,
createFlagParam, noredirect, policyName, ecpolicy);
createFlagParam, noredirect, policyName, ecpolicy,
namespaceQuota, storagespaceQuota, storageType);
}
});
}
@ -654,7 +675,10 @@ public class NamenodeWebHdfsMethods {
final CreateFlagParam createFlagParam,
final NoRedirectParam noredirectParam,
final StoragePolicyParam policyName,
final ECPolicyParam ecpolicy
final ECPolicyParam ecpolicy,
final NameSpaceQuotaParam namespaceQuota,
final StorageSpaceQuotaParam storagespaceQuota,
final StorageTypeParam storageType
) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
final ClientProtocol cp = getRpcClientProtocol();
@ -831,6 +855,17 @@ public class NamenodeWebHdfsMethods {
validateOpParams(op, ecpolicy);
cp.setErasureCodingPolicy(fullpath, ecpolicy.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
case SETQUOTA:
validateOpParams(op, namespaceQuota, storagespaceQuota);
cp.setQuota(fullpath, namespaceQuota.getValue(),
storagespaceQuota.getValue(), null);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
case SETQUOTABYSTORAGETYPE:
validateOpParams(op, storagespaceQuota, storageType);
cp.setQuota(fullpath, HdfsConstants.QUOTA_DONT_SET,
storagespaceQuota.getValue(),
StorageType.parseStorageType(storageType.getValue()));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
default:
throw new UnsupportedOperationException(op + " is not supported");
}

View File

@ -832,6 +832,34 @@ See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getConten
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage
### Set Quota
* Submit a HTTP PUT request.
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETQUOTA
&namespacequota=<QUOTA>[&storagespacequota=<QUOTA>]"
The client receives a response with zero content length:
HTTP/1.1 200 OK
Content-Length: 0
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuota
### Set Quota By Storage Type
* Submit a HTTP PUT request.
curl -i -X PUT "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=SETQUOTABYSTORAGETYPE
&storagetype=<STORAGETYPE>&storagespacequota=<QUOTA>"
The client receives a response with zero content length:
HTTP/1.1 200 OK
Content-Length: 0
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).setQuotaByStorageType
### Get File Checksum
* Submit a HTTP GET request.
@ -3179,6 +3207,42 @@ See also: [Authentication](#Authentication)
See also: [Create and Write to a File](#Create_and_Write_to_a_File)
### Namespace Quota
| Name | `namespacequota` |
|:---- |:---- |
| Description | Limit on the namespace usage, i.e., number of files/directories, under a directory. |
| Type | String |
| Default Value | Long.MAX_VALUE |
| Valid Values | \> 0. |
| Syntax | Any integer. |
See also: [`SETQUOTA`](#Set_Quota)
### Storage Space Quota
| Name | `storagespacequota` |
|:---- |:---- |
| Description | Limit on storage space usage (in bytes, including replication) under a directory. |
| Type | String |
| Default Value | Long.MAX_VALUE |
| Valid Values | \> 0. |
| Syntax | Any integer. |
See also: [`SETQUOTA`](#Set_Quota), [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type)
### Storage Type
| Name | `storagetype` |
|:---- |:---- |
| Description | Storage type of the specific storage type quota to be modified. |
| Type | String |
| Default Value | \<empty\> |
| Valid Values | Any valid storage type. |
| Syntax | Any string. |
See also: [`SETQUOTABYSTORAGETYPE`](#Set_Quota_By_Storage_Type)
### Storage Policy
| Name | `storagepolicy` |

View File

@ -55,6 +55,7 @@ import java.util.Random;
import com.google.common.collect.ImmutableList;
import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.test.LambdaTestUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@ -1129,9 +1130,66 @@ public class TestWebHDFS {
cluster.shutdown();
}
}
}
@Test
public void testSetQuota() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final Path path = new Path("/TestDir");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem dfs = cluster.getFileSystem();
final long nsQuota = 100;
final long spaceQuota = 1024;
webHdfs.mkdirs(path);
webHdfs.setQuota(path, nsQuota, spaceQuota);
QuotaUsage quotaUsage = dfs.getQuotaUsage(path);
assertEquals(nsQuota, quotaUsage.getQuota());
assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
webHdfs.setQuota(path,
HdfsConstants.QUOTA_RESET, HdfsConstants.QUOTA_RESET);
quotaUsage = dfs.getQuotaUsage(path);
assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getQuota());
assertEquals(HdfsConstants.QUOTA_RESET, quotaUsage.getSpaceQuota());
webHdfs.setQuotaByStorageType(path, StorageType.DISK, spaceQuota);
webHdfs.setQuotaByStorageType(path, StorageType.ARCHIVE, spaceQuota);
webHdfs.setQuotaByStorageType(path, StorageType.SSD, spaceQuota);
quotaUsage = dfs.getQuotaUsage(path);
assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.DISK));
assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.ARCHIVE));
assertEquals(spaceQuota, quotaUsage.getTypeQuota(StorageType.SSD));
// Test invalid parameters
LambdaTestUtils.intercept(IllegalArgumentException.class,
() -> webHdfs.setQuota(path, -100, 100));
LambdaTestUtils.intercept(IllegalArgumentException.class,
() -> webHdfs.setQuota(path, 100, -100));
LambdaTestUtils.intercept(IllegalArgumentException.class,
() -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100));
LambdaTestUtils.intercept(IllegalArgumentException.class,
() -> webHdfs.setQuotaByStorageType(path, null, 100));
LambdaTestUtils.intercept(IllegalArgumentException.class,
() -> webHdfs.setQuotaByStorageType(path, StorageType.SSD, -100));
LambdaTestUtils.intercept(IllegalArgumentException.class,
() -> webHdfs.setQuotaByStorageType(path, StorageType.RAM_DISK, 100));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testWebHdfsPread() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.web.resources;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
@ -25,6 +26,7 @@ import java.util.Arrays;
import java.util.EnumSet;
import java.util.List;
import org.apache.hadoop.fs.StorageType;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration;
@ -505,6 +507,33 @@ public class TestParam {
Assert.assertEquals("COLD", p.getValue());
}
@Test
public void testNamespaceQuotaParam() {
NameSpaceQuotaParam p =
new NameSpaceQuotaParam(NameSpaceQuotaParam.DEFAULT);
assertEquals(Long.valueOf(NameSpaceQuotaParam.DEFAULT), p.getValue());
p = new NameSpaceQuotaParam(100L);
assertEquals(100L, p.getValue().longValue());
}
@Test
public void testStorageSpaceQuotaParam() {
StorageSpaceQuotaParam sp = new StorageSpaceQuotaParam(
StorageSpaceQuotaParam.DEFAULT);
assertEquals(Long.valueOf(StorageSpaceQuotaParam.DEFAULT),
sp.getValue());
sp = new StorageSpaceQuotaParam(100L);
assertEquals(100L, sp.getValue().longValue());
}
@Test
public void testStorageTypeParam() {
StorageTypeParam p = new StorageTypeParam(StorageTypeParam.DEFAULT);
assertNull(p.getValue());
p = new StorageTypeParam(StorageType.DISK.name());
assertEquals(StorageType.DISK.name(), p.getValue());
}
@Test
public void testECPolicyParam() {
ECPolicyParam p = new ECPolicyParam(ECPolicyParam.DEFAULT);