HDFS-14034. Support getQuotaUsage API in WebHDFS. Contributed by Chao Sun.

Signed-off-by: Wei-Chiu Chuang <weichiu@apache.org>

(cherry picked from 3ae775d740)
This commit is contained in:
Chao Sun 2019-07-30 15:59:57 -07:00 committed by Erik Krogen
parent e47c483d9f
commit d38b617baa
12 changed files with 513 additions and 54 deletions

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32CastagnoliFileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
@ -426,30 +427,66 @@ public class JsonUtilClient {
return null; return null;
} }
final Map<?, ?> m = (Map<?, ?>)json.get( final Map<?, ?> m = (Map<?, ?>)
ContentSummary.class.getSimpleName()); json.get(ContentSummary.class.getSimpleName());
final long length = ((Number) m.get("length")).longValue(); final long length = ((Number) m.get("length")).longValue();
final long fileCount = ((Number) m.get("fileCount")).longValue(); final long fileCount = ((Number) m.get("fileCount")).longValue();
final long directoryCount = ((Number) m.get("directoryCount")).longValue(); final long directoryCount = ((Number) m.get("directoryCount")).longValue();
ContentSummary.Builder builder = new ContentSummary.Builder()
.length(length)
.fileCount(fileCount)
.directoryCount(directoryCount);
builder = buildQuotaUsage(builder, m, ContentSummary.Builder.class);
return builder.build();
}
/** Convert a JSON map to a QuotaUsage. */
static QuotaUsage toQuotaUsage(final Map<?, ?> json) {
if (json == null) {
return null;
}
final Map<?, ?> m = (Map<?, ?>) json.get(QuotaUsage.class.getSimpleName());
QuotaUsage.Builder builder = new QuotaUsage.Builder();
builder = buildQuotaUsage(builder, m, QuotaUsage.Builder.class);
return builder.build();
}
/**
* Given a builder for QuotaUsage, parse the provided map and
* construct the relevant fields. Return the updated builder.
*/
private static <T extends QuotaUsage.Builder> T buildQuotaUsage(
T builder, Map<?, ?> m, Class<T> type) {
final long quota = ((Number) m.get("quota")).longValue(); final long quota = ((Number) m.get("quota")).longValue();
final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue(); final long spaceConsumed = ((Number) m.get("spaceConsumed")).longValue();
final long spaceQuota = ((Number) m.get("spaceQuota")).longValue(); final long spaceQuota = ((Number) m.get("spaceQuota")).longValue();
final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota"); final Map<?, ?> typem = (Map<?, ?>) m.get("typeQuota");
ContentSummary.Builder contentSummaryBuilder =new ContentSummary.Builder() T result = type.cast(builder
.length(length).fileCount(fileCount).directoryCount(directoryCount) .quota(quota)
.quota(quota).spaceConsumed(spaceConsumed).spaceQuota(spaceQuota); .spaceConsumed(spaceConsumed)
.spaceQuota(spaceQuota));
// ContentSummary doesn't set this so check before using it
if (m.get("fileAndDirectoryCount") != null) {
final long fileAndDirectoryCount =
((Number) m.get("fileAndDirectoryCount")).longValue();
result = type.cast(result.fileAndDirectoryCount(fileAndDirectoryCount));
}
if (typem != null) { if (typem != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) { for (StorageType t : StorageType.getTypesSupportingQuota()) {
Map<?, ?> type = (Map<?, ?>) typem.get(t.toString()); Map<?, ?> typeQuota = (Map<?, ?>) typem.get(t.toString());
if (type != null) { if (typeQuota != null) {
contentSummaryBuilder = contentSummaryBuilder.typeQuota(t, result = type.cast(result.typeQuota(t,
((Number) type.get("quota")).longValue()).typeConsumed(t, ((Number) typeQuota.get("quota")).longValue()).typeConsumed(t,
((Number) type.get("consumed")).longValue()); ((Number) typeQuota.get("consumed")).longValue()));
} }
} }
} }
return contentSummaryBuilder.build();
return result;
} }
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */ /** Convert a Json map to a MD5MD5CRC32FileChecksum. */

View File

@ -75,6 +75,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.GlobalStorageStatistics; import org.apache.hadoop.fs.GlobalStorageStatistics;
import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider; import org.apache.hadoop.fs.GlobalStorageStatistics.StorageStatisticsProvider;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageStatistics; import org.apache.hadoop.fs.StorageStatistics;
import org.apache.hadoop.fs.permission.FsCreateModes; import org.apache.hadoop.fs.permission.FsCreateModes;
import org.apache.hadoop.hdfs.DFSOpsCountStatistics; import org.apache.hadoop.hdfs.DFSOpsCountStatistics;
@ -1884,6 +1885,20 @@ public class WebHdfsFileSystem extends FileSystem
}.run(); }.run();
} }
@Override
public QuotaUsage getQuotaUsage(final Path p) throws IOException {
statistics.incrementReadOps(1);
storageStatistics.incrementOpCounter(OpType.GET_QUOTA_USAGE);
final HttpOpParam.Op op = GetOpParam.Op.GETQUOTAUSAGE;
return new FsPathResponseRunner<QuotaUsage>(op, p) {
@Override
QuotaUsage decodeResponse(Map<?, ?> json) {
return JsonUtilClient.toQuotaUsage(json);
}
}.run();
}
@Override @Override
public MD5MD5CRC32FileChecksum getFileChecksum(final Path p public MD5MD5CRC32FileChecksum getFileChecksum(final Path p
) throws IOException { ) throws IOException {

View File

@ -28,6 +28,7 @@ public class GetOpParam extends HttpOpParam<GetOpParam.Op> {
GETFILESTATUS(false, HttpURLConnection.HTTP_OK), GETFILESTATUS(false, HttpURLConnection.HTTP_OK),
LISTSTATUS(false, HttpURLConnection.HTTP_OK), LISTSTATUS(false, HttpURLConnection.HTTP_OK),
GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK), GETCONTENTSUMMARY(false, HttpURLConnection.HTTP_OK),
GETQUOTAUSAGE(false, HttpURLConnection.HTTP_OK),
GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK), GETFILECHECKSUM(true, HttpURLConnection.HTTP_OK),
GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK), GETHOMEDIRECTORY(false, HttpURLConnection.HTTP_OK),

View File

@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PositionedReadable; import org.apache.hadoop.fs.PositionedReadable;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.Seekable; import org.apache.hadoop.fs.Seekable;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrCodec;
@ -191,9 +192,16 @@ public class HttpFSFileSystem extends FileSystem
public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount"; public static final String CONTENT_SUMMARY_DIRECTORY_COUNT_JSON = "directoryCount";
public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount"; public static final String CONTENT_SUMMARY_FILE_COUNT_JSON = "fileCount";
public static final String CONTENT_SUMMARY_LENGTH_JSON = "length"; public static final String CONTENT_SUMMARY_LENGTH_JSON = "length";
public static final String CONTENT_SUMMARY_QUOTA_JSON = "quota";
public static final String CONTENT_SUMMARY_SPACE_CONSUMED_JSON = "spaceConsumed"; public static final String QUOTA_USAGE_JSON = "QuotaUsage";
public static final String CONTENT_SUMMARY_SPACE_QUOTA_JSON = "spaceQuota"; public static final String QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON =
"fileAndDirectoryCount";
public static final String QUOTA_USAGE_QUOTA_JSON = "quota";
public static final String QUOTA_USAGE_SPACE_CONSUMED_JSON = "spaceConsumed";
public static final String QUOTA_USAGE_SPACE_QUOTA_JSON = "spaceQuota";
public static final String QUOTA_USAGE_CONSUMED_JSON = "consumed";
public static final String QUOTA_USAGE_TYPE_QUOTA_JSON = "typeQuota";
public static final String ACL_STATUS_JSON = "AclStatus"; public static final String ACL_STATUS_JSON = "AclStatus";
public static final String ACL_STICKY_BIT_JSON = "stickyBit"; public static final String ACL_STICKY_BIT_JSON = "stickyBit";
@ -222,8 +230,9 @@ public class HttpFSFileSystem extends FileSystem
public enum Operation { public enum Operation {
OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET), OPEN(HTTP_GET), GETFILESTATUS(HTTP_GET), LISTSTATUS(HTTP_GET),
GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET), GETHOMEDIRECTORY(HTTP_GET), GETCONTENTSUMMARY(HTTP_GET),
GETFILECHECKSUM(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET), GETQUOTAUSAGE(HTTP_GET), GETFILECHECKSUM(HTTP_GET),
INSTRUMENTATION(HTTP_GET), GETACLSTATUS(HTTP_GET), GETTRASHROOT(HTTP_GET), GETFILEBLOCKLOCATIONS(HTTP_GET), INSTRUMENTATION(HTTP_GET),
GETACLSTATUS(HTTP_GET), GETTRASHROOT(HTTP_GET),
APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST), APPEND(HTTP_POST), CONCAT(HTTP_POST), TRUNCATE(HTTP_POST),
CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT), CREATE(HTTP_PUT), MKDIRS(HTTP_PUT), RENAME(HTTP_PUT), SETOWNER(HTTP_PUT),
SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT), SETPERMISSION(HTTP_PUT), SETREPLICATION(HTTP_PUT), SETTIMES(HTTP_PUT),
@ -1124,14 +1133,65 @@ public class HttpFSFileSystem extends FileSystem
getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true); getConnection(Operation.GETCONTENTSUMMARY.getMethod(), params, f, true);
HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK); HttpExceptionUtils.validateResponse(conn, HttpURLConnection.HTTP_OK);
JSONObject json = (JSONObject) ((JSONObject) JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON); HttpFSUtils.jsonParse(conn)).get(CONTENT_SUMMARY_JSON);
return new ContentSummary.Builder(). ContentSummary.Builder builder = new ContentSummary.Builder()
length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON)). .length((Long) json.get(CONTENT_SUMMARY_LENGTH_JSON))
fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON)). .fileCount((Long) json.get(CONTENT_SUMMARY_FILE_COUNT_JSON))
directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON)). .directoryCount((Long) json.get(CONTENT_SUMMARY_DIRECTORY_COUNT_JSON));
quota((Long) json.get(CONTENT_SUMMARY_QUOTA_JSON)). builder = buildQuotaUsage(builder, json, ContentSummary.Builder.class);
spaceConsumed((Long) json.get(CONTENT_SUMMARY_SPACE_CONSUMED_JSON)). return builder.build();
spaceQuota((Long) json.get(CONTENT_SUMMARY_SPACE_QUOTA_JSON)).build(); }
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
Map<String, String> params = new HashMap<>();
params.put(OP_PARAM, Operation.GETQUOTAUSAGE.toString());
HttpURLConnection conn =
getConnection(Operation.GETQUOTAUSAGE.getMethod(), params, f, true);
JSONObject json = (JSONObject) ((JSONObject)
HttpFSUtils.jsonParse(conn)).get(QUOTA_USAGE_JSON);
QuotaUsage.Builder builder = new QuotaUsage.Builder();
builder = buildQuotaUsage(builder, json, QuotaUsage.Builder.class);
return builder.build();
}
/**
* Given a builder for QuotaUsage, parse the provided JSON object and
* construct the relevant fields. Return the updated builder.
*/
private static <T extends QuotaUsage.Builder> T buildQuotaUsage(
T builder, JSONObject json, Class<T> type) {
long quota = (Long) json.get(QUOTA_USAGE_QUOTA_JSON);
long spaceConsumed = (Long) json.get(QUOTA_USAGE_SPACE_CONSUMED_JSON);
long spaceQuota = (Long) json.get(QUOTA_USAGE_SPACE_QUOTA_JSON);
JSONObject typeJson = (JSONObject) json.get(QUOTA_USAGE_TYPE_QUOTA_JSON);
builder = type.cast(builder
.quota(quota)
.spaceConsumed(spaceConsumed)
.spaceQuota(spaceQuota)
);
// ContentSummary doesn't set this so check before using it
if (json.get(QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON) != null) {
long fileAndDirectoryCount = (Long)
json.get(QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON);
builder = type.cast(builder.fileAndDirectoryCount(fileAndDirectoryCount));
}
if (typeJson != null) {
for (StorageType t : StorageType.getTypesSupportingQuota()) {
JSONObject typeQuota = (JSONObject) typeJson.get(t.toString());
if (typeQuota != null) {
builder = type.cast(builder
.typeQuota(t, ((Long) typeQuota.get(QUOTA_USAGE_QUOTA_JSON)))
.typeConsumed(t, ((Long) typeQuota.get(QUOTA_USAGE_CONSUMED_JSON))
));
}
}
}
return builder;
} }
@Override @Override

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FilterFileSystem;
import org.apache.hadoop.fs.GlobFilter; import org.apache.hadoop.fs.GlobFilter;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrCodec;
import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.XAttrSetFlag;
@ -36,6 +37,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy; import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.web.JsonUtil; import org.apache.hadoop.hdfs.web.JsonUtil;
@ -56,6 +58,7 @@ import java.util.LinkedHashMap;
import java.util.List; import java.util.List;
import java.util.Map; import java.util.Map;
import java.util.Map.Entry; import java.util.Map.Entry;
import java.util.TreeMap;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTPFS_BUFFER_SIZE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.HTTP_BUFFER_SIZE_DEFAULT;
@ -249,17 +252,66 @@ public class FSOperations {
@SuppressWarnings({"unchecked"}) @SuppressWarnings({"unchecked"})
private static Map contentSummaryToJSON(ContentSummary contentSummary) { private static Map contentSummaryToJSON(ContentSummary contentSummary) {
Map json = new LinkedHashMap(); Map json = new LinkedHashMap();
json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON, contentSummary.getDirectoryCount()); json.put(HttpFSFileSystem.CONTENT_SUMMARY_DIRECTORY_COUNT_JSON,
json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON, contentSummary.getFileCount()); contentSummary.getDirectoryCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON, contentSummary.getLength()); json.put(HttpFSFileSystem.CONTENT_SUMMARY_FILE_COUNT_JSON,
json.put(HttpFSFileSystem.CONTENT_SUMMARY_QUOTA_JSON, contentSummary.getQuota()); contentSummary.getFileCount());
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_CONSUMED_JSON, contentSummary.getSpaceConsumed()); json.put(HttpFSFileSystem.CONTENT_SUMMARY_LENGTH_JSON,
json.put(HttpFSFileSystem.CONTENT_SUMMARY_SPACE_QUOTA_JSON, contentSummary.getSpaceQuota()); contentSummary.getLength());
Map<String, Object> quotaUsageMap = quotaUsageToMap(contentSummary);
for (Map.Entry<String, Object> e : quotaUsageMap.entrySet()) {
// For ContentSummary we don't need this since we already have
// separate count for file and directory.
if (!e.getKey().equals(
HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON)) {
json.put(e.getKey(), e.getValue());
}
}
Map response = new LinkedHashMap(); Map response = new LinkedHashMap();
response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json); response.put(HttpFSFileSystem.CONTENT_SUMMARY_JSON, json);
return response; return response;
} }
/**
* Converts a <code>QuotaUsage</code> object into a JSON array
* object.
*/
@SuppressWarnings({"unchecked"})
private static Map quotaUsageToJSON(QuotaUsage quotaUsage) {
Map response = new LinkedHashMap();
Map quotaUsageMap = quotaUsageToMap(quotaUsage);
response.put(HttpFSFileSystem.QUOTA_USAGE_JSON, quotaUsageMap);
return response;
}
private static Map<String, Object> quotaUsageToMap(QuotaUsage quotaUsage) {
Map<String, Object> result = new LinkedHashMap<>();
result.put(HttpFSFileSystem.QUOTA_USAGE_FILE_AND_DIRECTORY_COUNT_JSON,
quotaUsage.getFileAndDirectoryCount());
result.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON, quotaUsage.getQuota());
result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_CONSUMED_JSON,
quotaUsage.getSpaceConsumed());
result.put(HttpFSFileSystem.QUOTA_USAGE_SPACE_QUOTA_JSON,
quotaUsage.getSpaceQuota());
Map<String, Map<String, Long>> typeQuota = new TreeMap<>();
for (StorageType t : StorageType.getTypesSupportingQuota()) {
long tQuota = quotaUsage.getTypeQuota(t);
if (tQuota != HdfsConstants.QUOTA_RESET) {
Map<String, Long> type = typeQuota.get(t.toString());
if (type == null) {
type = new TreeMap<>();
typeQuota.put(t.toString(), type);
}
type.put(HttpFSFileSystem.QUOTA_USAGE_QUOTA_JSON,
quotaUsage.getTypeQuota(t));
type.put(HttpFSFileSystem.QUOTA_USAGE_CONSUMED_JSON,
quotaUsage.getTypeConsumed(t));
}
}
result.put(HttpFSFileSystem.QUOTA_USAGE_TYPE_QUOTA_JSON, typeQuota);
return result;
}
/** /**
* Converts an object into a Json Map with with one key-value entry. * Converts an object into a Json Map with with one key-value entry.
* <p/> * <p/>
@ -473,6 +525,26 @@ public class FSOperations {
} }
/**
* Executor that performs a quota-usage FileSystemAccess files system
* operation.
*/
@InterfaceAudience.Private
public static class FSQuotaUsage
implements FileSystemAccess.FileSystemExecutor<Map> {
private Path path;
public FSQuotaUsage(String path) {
this.path = new Path(path);
}
@Override
public Map execute(FileSystem fs) throws IOException {
QuotaUsage quotaUsage = fs.getQuotaUsage(path);
return quotaUsageToJSON(quotaUsage);
}
}
/** /**
* Executor that performs a create FileSystemAccess files system operation. * Executor that performs a create FileSystemAccess files system operation.
*/ */

View File

@ -57,6 +57,7 @@ public class HttpFSParametersProvider extends ParametersProvider {
PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class}); PARAMS_DEF.put(Operation.LISTSTATUS, new Class[]{FilterParam.class});
PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{}); PARAMS_DEF.put(Operation.GETHOMEDIRECTORY, new Class[]{});
PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{}); PARAMS_DEF.put(Operation.GETCONTENTSUMMARY, new Class[]{});
PARAMS_DEF.put(Operation.GETQUOTAUSAGE, new Class[]{});
PARAMS_DEF.put(Operation.GETFILECHECKSUM, PARAMS_DEF.put(Operation.GETFILECHECKSUM,
new Class[]{NoRedirectParam.class}); new Class[]{NoRedirectParam.class});
PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, new Class[]{}); PARAMS_DEF.put(Operation.GETFILEBLOCKLOCATIONS, new Class[]{});

View File

@ -304,6 +304,14 @@ public class HttpFSServer {
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build(); response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break; break;
} }
case GETQUOTAUSAGE: {
FSOperations.FSQuotaUsage command =
new FSOperations.FSQuotaUsage(path);
Map json = fsExecute(user, command);
AUDIT_LOG.info("[{}]", path);
response = Response.ok(json).type(MediaType.APPLICATION_JSON).build();
break;
}
case GETFILECHECKSUM: { case GETFILECHECKSUM: {
FSOperations.FSFileChecksum command = FSOperations.FSFileChecksum command =
new FSOperations.FSFileChecksum(path); new FSOperations.FSFileChecksum(path);

View File

@ -22,12 +22,15 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockStoragePolicySpi; import org.apache.hadoop.fs.BlockStoragePolicySpi;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemTestHelper; import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.contract.ContractTestUtils; import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.apache.hadoop.fs.http.server.HttpFSServerWebApp; import org.apache.hadoop.fs.http.server.HttpFSServerWebApp;
import org.apache.hadoop.fs.permission.AclEntry; import org.apache.hadoop.fs.permission.AclEntry;
@ -663,17 +666,56 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
fs = getHttpFSFileSystem(); fs = getHttpFSFileSystem();
ContentSummary httpContentSummary = fs.getContentSummary(path); ContentSummary httpContentSummary = fs.getContentSummary(path);
fs.close(); fs.close();
assertEquals(httpContentSummary.getDirectoryCount(), assertEquals(hdfsContentSummary.getDirectoryCount(),
hdfsContentSummary.getDirectoryCount()); httpContentSummary.getDirectoryCount());
assertEquals(httpContentSummary.getFileCount(), assertEquals(hdfsContentSummary.getFileCount(),
hdfsContentSummary.getFileCount()); httpContentSummary.getFileCount());
assertEquals(httpContentSummary.getLength(), assertEquals(hdfsContentSummary.getLength(),
hdfsContentSummary.getLength()); httpContentSummary.getLength());
assertEquals(httpContentSummary.getQuota(), hdfsContentSummary.getQuota()); assertEquals(hdfsContentSummary.getQuota(), httpContentSummary.getQuota());
assertEquals(httpContentSummary.getSpaceConsumed(), assertEquals(hdfsContentSummary.getSpaceConsumed(),
hdfsContentSummary.getSpaceConsumed()); httpContentSummary.getSpaceConsumed());
assertEquals(httpContentSummary.getSpaceQuota(), assertEquals(hdfsContentSummary.getSpaceQuota(),
hdfsContentSummary.getSpaceQuota()); httpContentSummary.getSpaceQuota());
}
private void testQuotaUsage() throws Exception {
if (isLocalFS()) {
// LocalFS doesn't support setQuota so skip here
return;
}
DistributedFileSystem dfs =
(DistributedFileSystem) FileSystem.get(getProxiedFSConf());
Path path = new Path(getProxiedFSTestDir(), "foo");
dfs.mkdirs(path);
dfs.setQuota(path, 20, 600 * 1024 * 1024);
for (int i = 0; i < 10; i++) {
dfs.createNewFile(new Path(path, "test_file_" + i));
}
FSDataOutputStream out = dfs.create(new Path(path, "test_file"));
out.writeUTF("Hello World");
out.close();
dfs.setQuotaByStorageType(path, StorageType.SSD, 100000);
dfs.setQuotaByStorageType(path, StorageType.DISK, 200000);
QuotaUsage hdfsQuotaUsage = dfs.getQuotaUsage(path);
dfs.close();
FileSystem fs = getHttpFSFileSystem();
QuotaUsage httpQuotaUsage = fs.getQuotaUsage(path);
fs.close();
assertEquals(hdfsQuotaUsage.getFileAndDirectoryCount(),
httpQuotaUsage.getFileAndDirectoryCount());
assertEquals(hdfsQuotaUsage.getQuota(), httpQuotaUsage.getQuota());
assertEquals(hdfsQuotaUsage.getSpaceConsumed(),
httpQuotaUsage.getSpaceConsumed());
assertEquals(hdfsQuotaUsage.getSpaceQuota(),
httpQuotaUsage.getSpaceQuota());
assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.SSD),
httpQuotaUsage.getTypeQuota(StorageType.SSD));
assertEquals(hdfsQuotaUsage.getTypeQuota(StorageType.DISK),
httpQuotaUsage.getTypeQuota(StorageType.DISK));
} }
/** Set xattr */ /** Set xattr */
@ -1068,9 +1110,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
protected enum Operation { protected enum Operation {
GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS, GET, OPEN, CREATE, APPEND, TRUNCATE, CONCAT, RENAME, DELETE, LIST_STATUS,
WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER, WORKING_DIRECTORY, MKDIRS, SET_TIMES, SET_PERMISSION, SET_OWNER,
SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, FILEACLS, DIRACLS, SET_XATTR, SET_REPLICATION, CHECKSUM, CONTENT_SUMMARY, QUOTA_USAGE, FILEACLS, DIRACLS,
GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION, LIST_STATUS_BATCH, SET_XATTR, GET_XATTRS, REMOVE_XATTR, LIST_XATTRS, ENCRYPTION,
GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING, LIST_STATUS_BATCH, GETTRASHROOT, STORAGEPOLICY, ERASURE_CODING,
CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT, CREATE_SNAPSHOT, RENAME_SNAPSHOT, DELETE_SNAPSHOT,
ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION, ALLOW_SNAPSHOT, DISALLOW_SNAPSHOT, DISALLOW_SNAPSHOT_EXCEPTION,
FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST FILE_STATUS_ATTR, GET_SNAPSHOT_DIFF, GET_SNAPSHOTTABLE_DIRECTORY_LIST
@ -1129,6 +1171,9 @@ public abstract class BaseTestHttpFSWith extends HFSTestCase {
case CONTENT_SUMMARY: case CONTENT_SUMMARY:
testContentSummary(); testContentSummary();
break; break;
case QUOTA_USAGE:
testQuotaUsage();
break;
case FILEACLS: case FILEACLS:
testFileAcls(); testFileAcls();
break; break;

View File

@ -56,6 +56,7 @@ import javax.ws.rs.core.StreamingOutput;
import javax.ws.rs.core.Response.ResponseBuilder; import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.Response.Status; import javax.ws.rs.core.Response.Status;
import org.apache.hadoop.fs.QuotaUsage;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -1161,6 +1162,12 @@ public class NamenodeWebHdfsMethods {
final String js = JsonUtil.toJsonString(contentsummary); final String js = JsonUtil.toJsonString(contentsummary);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }
case GETQUOTAUSAGE:
{
final QuotaUsage quotaUsage = cp.getQuotaUsage(fullpath);
final String js = JsonUtil.toJsonString(quotaUsage);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETFILECHECKSUM: case GETFILECHECKSUM:
{ {
final NameNode namenode = (NameNode)context.getAttribute("name.node"); final NameNode namenode = (NameNode)context.getAttribute("name.node");

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType; import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr; import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrCodec; import org.apache.hadoop.fs.XAttrCodec;
@ -353,25 +354,44 @@ public class JsonUtil {
m.put("length", contentsummary.getLength()); m.put("length", contentsummary.getLength());
m.put("fileCount", contentsummary.getFileCount()); m.put("fileCount", contentsummary.getFileCount());
m.put("directoryCount", contentsummary.getDirectoryCount()); m.put("directoryCount", contentsummary.getDirectoryCount());
m.put("quota", contentsummary.getQuota()); // For ContentSummary we don't need this since we already have
m.put("spaceConsumed", contentsummary.getSpaceConsumed()); // separate count for file and directory.
m.put("spaceQuota", contentsummary.getSpaceQuota()); m.putAll(toJsonMap(contentsummary, false));
final Map<String, Map<String, Long>> typeQuota = return toJsonString(ContentSummary.class, m);
new TreeMap<String, Map<String, Long>>(); }
/** Convert a QuotaUsage to a JSON string. */
public static String toJsonString(final QuotaUsage quotaUsage) {
if (quotaUsage == null) {
return null;
}
return toJsonString(QuotaUsage.class, toJsonMap(quotaUsage, true));
}
private static Map<String, Object> toJsonMap(
final QuotaUsage quotaUsage, boolean includeFileAndDirectoryCount) {
final Map<String, Object> m = new TreeMap<>();
if (includeFileAndDirectoryCount) {
m.put("fileAndDirectoryCount", quotaUsage.getFileAndDirectoryCount());
}
m.put("quota", quotaUsage.getQuota());
m.put("spaceConsumed", quotaUsage.getSpaceConsumed());
m.put("spaceQuota", quotaUsage.getSpaceQuota());
final Map<String, Map<String, Long>> typeQuota = new TreeMap<>();
for (StorageType t : StorageType.getTypesSupportingQuota()) { for (StorageType t : StorageType.getTypesSupportingQuota()) {
long tQuota = contentsummary.getTypeQuota(t); long tQuota = quotaUsage.getTypeQuota(t);
if (tQuota != HdfsConstants.QUOTA_RESET) { if (tQuota != HdfsConstants.QUOTA_RESET) {
Map<String, Long> type = typeQuota.get(t.toString()); Map<String, Long> type = typeQuota.get(t.toString());
if (type == null) { if (type == null) {
type = new TreeMap<String, Long>(); type = new TreeMap<>();
typeQuota.put(t.toString(), type); typeQuota.put(t.toString(), type);
} }
type.put("quota", contentsummary.getTypeQuota(t)); type.put("quota", quotaUsage.getTypeQuota(t));
type.put("consumed", contentsummary.getTypeConsumed(t)); type.put("consumed", quotaUsage.getTypeConsumed(t));
} }
} }
m.put("typeQuota", typeQuota); m.put("typeQuota", typeQuota);
return toJsonString(ContentSummary.class, m); return m;
} }
/** Convert a MD5MD5CRC32FileChecksum to a Json string. */ /** Convert a MD5MD5CRC32FileChecksum to a Json string. */

View File

@ -39,6 +39,7 @@ The HTTP REST API supports the complete [FileSystem](../../api/org/apache/hadoop
* [`LISTSTATUS`](#List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus) * [`LISTSTATUS`](#List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatus)
* [`LISTSTATUS_BATCH`](#Iteratively_List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatusIterator) * [`LISTSTATUS_BATCH`](#Iteratively_List_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).listStatusIterator)
* [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary) * [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary)
* [`GETQUOTAUSAGE`](#Get_Quota_Usage_of_a_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage)
* [`GETFILECHECKSUM`](#Get_File_Checksum) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileChecksum) * [`GETFILECHECKSUM`](#Get_File_Checksum) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getFileChecksum)
* [`GETHOMEDIRECTORY`](#Get_Home_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getHomeDirectory) * [`GETHOMEDIRECTORY`](#Get_Home_Directory) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getHomeDirectory)
* [`GETDELEGATIONTOKEN`](#Get_Delegation_Token) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken) * [`GETDELEGATIONTOKEN`](#Get_Delegation_Token) (see [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getDelegationToken)
@ -775,6 +776,48 @@ Other File System Operations
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getContentSummary
### Get Quota Usage of a Directory
* Submit a HTTP GET request.
curl -i "http://<HOST>:<PORT>/webhdfs/v1/<PATH>?op=GETQUOTAUSAGE"
The client receives a response with a [`QuotaUsage` JSON object](#QuotaUsage_JSON_Schema):
HTTP/1.1 200 OK
Content-Type: application/json
Transfer-Encoding: chunked
{
"QuotaUsage":
{
"fileAndDirectoryCount": 1,
"quota" : 100,
"spaceConsumed" : 24930,
"spaceQuota" : 100000,
"typeQuota":
{
"ARCHIVE":
{
"consumed": 500,
"quota": 10000
},
"DISK":
{
"consumed": 500,
"quota": 10000
},
"SSD":
{
"consumed": 500,
"quota": 10000
}
}
}
}
See also: [FileSystem](../../api/org/apache/hadoop/fs/FileSystem.html).getQuotaUsage
### Get File Checksum ### Get File Checksum
* Submit a HTTP GET request. * Submit a HTTP GET request.
@ -1887,6 +1930,114 @@ See also: [`MKDIRS`](#Make_a_Directory), [`RENAME`](#Rename_a_FileDirectory), [`
See also: [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory) See also: [`GETCONTENTSUMMARY`](#Get_Content_Summary_of_a_Directory)
### QuotaUsage JSON Schema
```json
{
"name" : "QuotaUsage",
"properties":
{
"QuotaUsage":
{
"type" : "object",
"properties":
{
"fileAndDirectoryCount":
{
"description": "The number of files and directories.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The namespace quota of this directory.",
"type" : "integer",
"required" : true
},
"spaceConsumed":
{
"description": "The disk space consumed by the content.",
"type" : "integer",
"required" : true
},
"spaceQuota":
{
"description": "The disk space quota.",
"type" : "integer",
"required" : true
},
"typeQuota":
{
"type" : "object",
"properties":
{
"ARCHIVE":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
},
"DISK":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
},
"SSD":
{
"type" : "object",
"properties":
{
"consumed":
{
"description": "The storage type space consumed.",
"type" : "integer",
"required" : true
},
"quota":
{
"description": "The storage type quota.",
"type" : "integer",
"required" : true
}
}
}
}
}
}
}
}
}
```
See also: [`GETQUOTAUSAGE`](#Get_Quota_Usage_of_a_Directory)
### FileChecksum JSON Schema ### FileChecksum JSON Schema

View File

@ -54,6 +54,7 @@ import java.util.Random;
import com.google.common.collect.ImmutableList; import com.google.common.collect.ImmutableList;
import org.apache.commons.io.IOUtils; import org.apache.commons.io.IOUtils;
import org.apache.hadoop.fs.QuotaUsage;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -1086,6 +1087,47 @@ public class TestWebHDFS {
} }
} }
@Test
public void testQuotaUsage() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
final Path path = new Path("/TestDir");
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
conf, WebHdfsConstants.WEBHDFS_SCHEME);
final DistributedFileSystem dfs = cluster.getFileSystem();
final long nsQuota = 100;
final long spaceQuota = 600 * 1024 * 1024;
final long diskQuota = 100000;
final byte[] bytes = {0x0, 0x1, 0x2, 0x3};
dfs.mkdirs(path);
dfs.setQuota(path, nsQuota, spaceQuota);
for (int i = 0; i < 10; i++) {
dfs.createNewFile(new Path(path, "test_file_" + i));
}
FSDataOutputStream out = dfs.create(new Path(path, "test_file"));
out.write(bytes);
out.close();
dfs.setQuotaByStorageType(path, StorageType.DISK, diskQuota);
QuotaUsage quotaUsage = webHdfs.getQuotaUsage(path);
assertEquals(12, quotaUsage.getFileAndDirectoryCount());
assertEquals(nsQuota, quotaUsage.getQuota());
assertEquals(bytes.length * dfs.getDefaultReplication(), quotaUsage.getSpaceConsumed());
assertEquals(spaceQuota, quotaUsage.getSpaceQuota());
assertEquals(diskQuota, quotaUsage.getTypeQuota(StorageType.DISK));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
@Test @Test
public void testWebHdfsPread() throws Exception { public void testWebHdfsPread() throws Exception {
final Configuration conf = WebHdfsTestUtil.createConf(); final Configuration conf = WebHdfsTestUtil.createConf();