HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types. Contributed by Surendra Singh Lilhore.

(cherry picked from commit 41d3f8899d)
This commit is contained in:
Xiaoyu Yao 2015-11-09 09:57:56 -08:00
parent aaa38581f5
commit 48a494cdc3
5 changed files with 142 additions and 8 deletions

View File

@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.web;
import com.google.common.collect.Lists; import com.google.common.collect.Lists;
import com.google.common.collect.Maps; import com.google.common.collect.Maps;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.ContentSummary.Builder;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus; import org.apache.hadoop.fs.permission.AclStatus;
@ -316,14 +318,22 @@ class JsonUtilClient {
final long quota = ((Number) m.get("quota")).longValue(); final long quota = ((Number) m.get("quota")).longValue();
final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue(); final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
final long spaceQuota = ((Number) m.get("spaceQuota")).longValue(); final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
return new ContentSummary.Builder() Builder contentSummaryBuilder = new ContentSummary.Builder().length(length)
.length(length) .fileCount(fileCount).directoryCount(directoryCount).quota(quota)
.fileCount(fileCount) .spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
.directoryCount(directoryCount) if (typem != null) {
.quota(quota) for (StorageType t : StorageType.getTypesSupportingQuota()) {
.spaceConsumed(spaceConsumed) Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());
.spaceQuota(spaceQuota).build(); if (type != null) {
contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
((Number) type.get("quota")).longValue()).typeConsumed(t,
((Number) type.get("consumed")).longValue());
}
}
}
return contentSummaryBuilder.build();
} }
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */ /** Convert a Json map to a MD5MD5CRC32FileChecksum. */

View File

@ -1438,6 +1438,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9364. Unnecessary DNS resolution attempts when creating NameNodeProxies. HDFS-9364. Unnecessary DNS resolution attempts when creating NameNodeProxies.
(Xiao Chen via zhz) (Xiao Chen via zhz)
HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types.
(Surendra Singh Lilhore via xyao)
Release 2.7.3 - UNRELEASED Release 2.7.3 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -253,6 +253,21 @@ public class JsonUtil {
m.put("quota", contentsummary.getQuota()); m.put("quota", contentsummary.getQuota());
m.put("spaceConsumed", contentsummary.getSpaceConsumed()); m.put("spaceConsumed", contentsummary.getSpaceConsumed());
m.put("spaceQuota", contentsummary.getSpaceQuota()); m.put("spaceQuota", contentsummary.getSpaceQuota());
final Map<String, Map<String, Long>> typeQuota =
new TreeMap<String, Map<String, Long>>();
for (StorageType t : StorageType.getTypesSupportingQuota()) {
long tQuota = contentsummary.getTypeQuota(t);
if (tQuota != HdfsConstants.QUOTA_RESET) {
Map<String, Long> type = typeQuota.get(t.toString());
if (type == null) {
type = new TreeMap<String, Long>();
typeQuota.put(t.toString(), type);
}
type.put("quota", contentsummary.getTypeQuota(t));
type.put("consumed", contentsummary.getTypeConsumed(t));
}
}
m.put("typeQuota", typeQuota);
return toJsonString(ContentSummary.class, m); return toJsonString(ContentSummary.class, m);
} }

View File

@ -536,7 +536,25 @@ Other File System Operations
"length" : 24930, "length" : 24930,
"quota" : -1, "quota" : -1,
"spaceConsumed" : 24930, "spaceConsumed" : 24930,
"spaceQuota" : -1 "spaceQuota" : -1,
"typeQuota":
{
"ARCHIVE":
{
"consumed": 500,
"quota": 10000
},
"DISK":
{
"consumed": 500,
"quota": 10000
},
"SSD":
{
"consumed": 500,
"quota": 10000
}
}
} }
} }
@ -1261,6 +1279,70 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [`
"description": "The disk space quota.", "description": "The disk space quota.",
"type" : "integer", "type" : "integer",
"required" : true "required" : true
},
"typeQuota":
{
"type" : "object",
"properties":
{
"ARCHIVE":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
},
"DISK":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
},
"SSD":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
}
}
} }
} }
} }

View File

@ -38,10 +38,12 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -599,6 +601,28 @@ public class TestWebHDFS {
} }
} }
@Test
public void testContentSummary() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final Path path = new Path("/QuotaDir");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(path);
dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
ContentSummary contentSummary = webHdfs.getContentSummary(path);
Assert.assertTrue((contentSummary.getTypeQuota(
StorageType.DISK) == 100000));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test @Test
public void testWebHdfsPread() throws Exception { public void testWebHdfsPread() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf(); final Configuration conf = WebHdfsTestUtil.createConf();