HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types. Contributed by Surendra Singh Lilhore.

This commit is contained in:
Xiaoyu Yao 2015-11-09 09:57:56 -08:00
parent ef926b2e38
commit 41d3f8899d
5 changed files with 142 additions and 8 deletions

View File

@ -20,11 +20,13 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.ContentSummary.Builder;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
@ -316,14 +318,22 @@ static ContentSummary toContentSummary(final Map<?, ?> json) {
final long quota = ((Number) m.get("quota")).longValue();
final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
return new ContentSummary.Builder()
.length(length)
.fileCount(fileCount)
.directoryCount(directoryCount)
.quota(quota)
.spaceConsumed(spaceConsumed)
.spaceQuota(spaceQuota).build();
Builder contentSummaryBuilder = new ContentSummary.Builder().length(length)
.fileCount(fileCount).directoryCount(directoryCount).quota(quota)
.spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
if (typem != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());
if (type != null) {
contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
((Number) type.get("quota")).longValue()).typeConsumed(t,
((Number) type.get("consumed")).longValue());
}
}
}
return contentSummaryBuilder.build();
}
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */

View File

@ -198,6 +198,9 @@ Trunk (Unreleased)
HADOOP-11684. S3a to use thread pool that blocks clients.
(Thomas Demoor and Aaron Fabbri via lei)
HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types.
(Surendra Singh Lilhore via xyao)
OPTIMIZATIONS
BUG FIXES

View File

@ -253,6 +253,21 @@ public static String toJsonString(final ContentSummary contentsummary) {
m.put("quota", contentsummary.getQuota());
m.put("spaceConsumed", contentsummary.getSpaceConsumed());
m.put("spaceQuota", contentsummary.getSpaceQuota());
final Map<String, Map<String, Long>> typeQuota =
new TreeMap<String, Map<String, Long>>();
for (StorageType t : StorageType.getTypesSupportingQuota()) {
long tQuota = contentsummary.getTypeQuota(t);
if (tQuota != HdfsConstants.QUOTA_RESET) {
Map<String, Long> type = typeQuota.get(t.toString());
if (type == null) {
type = new TreeMap<String, Long>();
typeQuota.put(t.toString(), type);
}
type.put("quota", contentsummary.getTypeQuota(t));
type.put("consumed", contentsummary.getTypeConsumed(t));
}
}
m.put("typeQuota", typeQuota);
return toJsonString(ContentSummary.class, m);
}

View File

@ -535,7 +535,25 @@ Other File System Operations
"length" : 24930,
"quota" : -1,
"spaceConsumed" : 24930,
"spaceQuota" : -1
"spaceQuota" : -1,
"typeQuota":
{
"ARCHIVE":
{
"consumed": 500,
"quota": 10000
},
"DISK":
{
"consumed": 500,
"quota": 10000
},
"SSD":
{
"consumed": 500,
"quota": 10000
}
}
}
}
@ -1260,6 +1278,70 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [`
"description": "The disk space quota.",
"type" : "integer",
"required" : true
},
"typeQuota":
{
"type" : "object",
"properties":
{
"ARCHIVE":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
},
"DISK":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
},
"SSD":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
}
}
}
}
}

View File

@ -39,10 +39,12 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -642,6 +644,28 @@ public void testWebHdfsOffsetAndLength() throws Exception{
}
}
@Test
public void testContentSummary() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final Path path = new Path("/QuotaDir");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem dfs = cluster.getFileSystem();
dfs.mkdirs(path);
dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
ContentSummary contentSummary = webHdfs.getContentSummary(path);
Assert.assertTrue((contentSummary.getTypeQuota(
StorageType.DISK) == 100000));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test
public void testWebHdfsPread() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf();