HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types. Contributed by Surendra Singh Lilhore.
This commit is contained in:
parent
ef926b2e38
commit
41d3f8899d
|
@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.web;
|
||||||
import com.google.common.collect.Lists;
|
import com.google.common.collect.Lists;
|
||||||
import com.google.common.collect.Maps;
|
import com.google.common.collect.Maps;
|
||||||
import org.apache.hadoop.fs.ContentSummary;
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
|
import org.apache.hadoop.fs.ContentSummary.Builder;
|
||||||
import org.apache.hadoop.fs.FileChecksum;
|
import org.apache.hadoop.fs.FileChecksum;
|
||||||
import org.apache.hadoop.fs.FileStatus;
|
import org.apache.hadoop.fs.FileStatus;
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
||||||
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
|
||||||
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.XAttrCodec;
|
import org.apache.hadoop.fs.XAttrCodec;
|
||||||
import org.apache.hadoop.fs.permission.AclEntry;
|
import org.apache.hadoop.fs.permission.AclEntry;
|
||||||
import org.apache.hadoop.fs.permission.AclStatus;
|
import org.apache.hadoop.fs.permission.AclStatus;
|
||||||
|
@ -316,14 +318,22 @@ class JsonUtilClient {
|
||||||
final long quota = ((Number) m.get("quota")).longValue();
|
final long quota = ((Number) m.get("quota")).longValue();
|
||||||
final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
|
final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
|
||||||
final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
|
final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
|
||||||
|
final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
|
||||||
|
|
||||||
return new ContentSummary.Builder()
|
Builder contentSummaryBuilder = new ContentSummary.Builder().length(length)
|
||||||
.length(length)
|
.fileCount(fileCount).directoryCount(directoryCount).quota(quota)
|
||||||
.fileCount(fileCount)
|
.spaceConsumed(spaceConsumed).spaceQuota(spaceQuota);
|
||||||
.directoryCount(directoryCount)
|
if (typem != null) {
|
||||||
.quota(quota)
|
for (StorageType t : StorageType.getTypesSupportingQuota()) {
|
||||||
.spaceConsumed(spaceConsumed)
|
Map<?, ?> type = (Map<?, ?>) typem.get(t.toString());
|
||||||
.spaceQuota(spaceQuota).build();
|
if (type != null) {
|
||||||
|
contentSummaryBuilder = contentSummaryBuilder.typeQuota(t,
|
||||||
|
((Number) type.get("quota")).longValue()).typeConsumed(t,
|
||||||
|
((Number) type.get("consumed")).longValue());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return contentSummaryBuilder.build();
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */
|
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */
|
||||||
|
|
|
@ -198,6 +198,9 @@ Trunk (Unreleased)
|
||||||
HADOOP-11684. S3a to use thread pool that blocks clients.
|
HADOOP-11684. S3a to use thread pool that blocks clients.
|
||||||
(Thomas Demoor and Aaron Fabbri via lei)
|
(Thomas Demoor and Aaron Fabbri via lei)
|
||||||
|
|
||||||
|
HDFS-9234. WebHdfs: getContentSummary() should give quota for storage types.
|
||||||
|
(Surendra Singh Lilhore via xyao)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
|
@ -253,6 +253,21 @@ public class JsonUtil {
|
||||||
m.put("quota", contentsummary.getQuota());
|
m.put("quota", contentsummary.getQuota());
|
||||||
m.put("spaceConsumed", contentsummary.getSpaceConsumed());
|
m.put("spaceConsumed", contentsummary.getSpaceConsumed());
|
||||||
m.put("spaceQuota", contentsummary.getSpaceQuota());
|
m.put("spaceQuota", contentsummary.getSpaceQuota());
|
||||||
|
final Map<String, Map<String, Long>> typeQuota =
|
||||||
|
new TreeMap<String, Map<String, Long>>();
|
||||||
|
for (StorageType t : StorageType.getTypesSupportingQuota()) {
|
||||||
|
long tQuota = contentsummary.getTypeQuota(t);
|
||||||
|
if (tQuota != HdfsConstants.QUOTA_RESET) {
|
||||||
|
Map<String, Long> type = typeQuota.get(t.toString());
|
||||||
|
if (type == null) {
|
||||||
|
type = new TreeMap<String, Long>();
|
||||||
|
typeQuota.put(t.toString(), type);
|
||||||
|
}
|
||||||
|
type.put("quota", contentsummary.getTypeQuota(t));
|
||||||
|
type.put("consumed", contentsummary.getTypeConsumed(t));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
m.put("typeQuota", typeQuota);
|
||||||
return toJsonString(ContentSummary.class, m);
|
return toJsonString(ContentSummary.class, m);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -535,7 +535,25 @@ Other File System Operations
|
||||||
"length" : 24930,
|
"length" : 24930,
|
||||||
"quota" : -1,
|
"quota" : -1,
|
||||||
"spaceConsumed" : 24930,
|
"spaceConsumed" : 24930,
|
||||||
"spaceQuota" : -1
|
"spaceQuota" : -1,
|
||||||
|
"typeQuota":
|
||||||
|
{
|
||||||
|
"ARCHIVE":
|
||||||
|
{
|
||||||
|
"consumed": 500,
|
||||||
|
"quota": 10000
|
||||||
|
},
|
||||||
|
"DISK":
|
||||||
|
{
|
||||||
|
"consumed": 500,
|
||||||
|
"quota": 10000
|
||||||
|
},
|
||||||
|
"SSD":
|
||||||
|
{
|
||||||
|
"consumed": 500,
|
||||||
|
"quota": 10000
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1260,6 +1278,70 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [`
|
||||||
"description": "The disk space quota.",
|
"description": "The disk space quota.",
|
||||||
"type" : "integer",
|
"type" : "integer",
|
||||||
"required" : true
|
"required" : true
|
||||||
|
},
|
||||||
|
"typeQuota":
|
||||||
|
{
|
||||||
|
"type" : "object",
|
||||||
|
"properties":
|
||||||
|
{
|
||||||
|
"ARCHIVE":
|
||||||
|
{
|
||||||
|
"type" : "object",
|
||||||
|
"properties":
|
||||||
|
{
|
||||||
|
"consumed":
|
||||||
|
{
|
||||||
|
"description": "The storage type space consumed.",
|
||||||
|
"type" : "integer",
|
||||||
|
"required" : true
|
||||||
|
},
|
||||||
|
"quota":
|
||||||
|
{
|
||||||
|
"description": "The storage type quota.",
|
||||||
|
"type" : "integer",
|
||||||
|
"required" : true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"DISK":
|
||||||
|
{
|
||||||
|
"type" : "object",
|
||||||
|
"properties":
|
||||||
|
{
|
||||||
|
"consumed":
|
||||||
|
{
|
||||||
|
"description": "The storage type space consumed.",
|
||||||
|
"type" : "integer",
|
||||||
|
"required" : true
|
||||||
|
},
|
||||||
|
"quota":
|
||||||
|
{
|
||||||
|
"description": "The storage type quota.",
|
||||||
|
"type" : "integer",
|
||||||
|
"required" : true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"SSD":
|
||||||
|
{
|
||||||
|
"type" : "object",
|
||||||
|
"properties":
|
||||||
|
{
|
||||||
|
"consumed":
|
||||||
|
{
|
||||||
|
"description": "The storage type space consumed.",
|
||||||
|
"type" : "integer",
|
||||||
|
"required" : true
|
||||||
|
},
|
||||||
|
"quota":
|
||||||
|
{
|
||||||
|
"description": "The storage type quota.",
|
||||||
|
"type" : "integer",
|
||||||
|
"required" : true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,10 +39,12 @@ import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
|
import org.apache.hadoop.fs.ContentSummary;
|
||||||
import org.apache.hadoop.fs.FSDataInputStream;
|
import org.apache.hadoop.fs.FSDataInputStream;
|
||||||
import org.apache.hadoop.fs.FSDataOutputStream;
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.fs.StorageType;
|
||||||
import org.apache.hadoop.fs.permission.FsAction;
|
import org.apache.hadoop.fs.permission.FsAction;
|
||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
|
@ -642,6 +644,28 @@ public class TestWebHDFS {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testContentSummary() throws Exception {
|
||||||
|
MiniDFSCluster cluster = null;
|
||||||
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
||||||
|
final Path path = new Path("/QuotaDir");
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
|
||||||
|
conf, WebHdfsConstants.WEBHDFS_SCHEME);
|
||||||
|
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||||
|
dfs.mkdirs(path);
|
||||||
|
dfs.setQuotaByStorageType(path, StorageType.DISK, 100000);
|
||||||
|
ContentSummary contentSummary = webHdfs.getContentSummary(path);
|
||||||
|
Assert.assertTrue((contentSummary.getTypeQuota(
|
||||||
|
StorageType.DISK) == 100000));
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testWebHdfsPread() throws Exception {
|
public void testWebHdfsPread() throws Exception {
|
||||||
final Configuration conf = WebHdfsTestUtil.createConf();
|
final Configuration conf = WebHdfsTestUtil.createConf();
|
||||||
|
|
Loading…
Reference in New Issue