HDFS-8898. Create API and command-line argument to get quota and quota usage without detailed content summary. Contributed by Ming Ma.

This commit is contained in:
Kihwal Lee 2016-01-21 12:04:14 -06:00
parent d6258b33a7
commit 2fd19b9674
28 changed files with 1124 additions and 231 deletions

View File

@ -30,26 +30,15 @@ import org.apache.hadoop.util.StringUtils;
/** Store the summary of a content (a directory or a file). */
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class ContentSummary implements Writable{
public class ContentSummary extends QuotaUsage implements Writable{
private long length;
private long fileCount;
private long directoryCount;
private long quota;
private long spaceConsumed;
private long spaceQuota;
private long typeConsumed[];
private long typeQuota[];
public static class Builder{
/** We don't use generics. Instead override spaceConsumed and other methods
in order to keep backward compatibility. */
public static class Builder extends QuotaUsage.Builder {
public Builder() {
this.quota = -1;
this.spaceQuota = -1;
typeConsumed = new long[StorageType.values().length];
typeQuota = new long[StorageType.values().length];
for (int i = 0; i < typeQuota.length; i++) {
typeQuota[i] = -1;
}
}
public Builder length(long length) {
@ -67,58 +56,57 @@ public class ContentSummary implements Writable{
return this;
}
@Override
public Builder quota(long quota){
this.quota = quota;
super.quota(quota);
return this;
}
@Override
public Builder spaceConsumed(long spaceConsumed) {
this.spaceConsumed = spaceConsumed;
super.spaceConsumed(spaceConsumed);
return this;
}
@Override
public Builder spaceQuota(long spaceQuota) {
this.spaceQuota = spaceQuota;
super.spaceQuota(spaceQuota);
return this;
}
@Override
public Builder typeConsumed(long typeConsumed[]) {
for (int i = 0; i < typeConsumed.length; i++) {
this.typeConsumed[i] = typeConsumed[i];
}
super.typeConsumed(typeConsumed);
return this;
}
@Override
public Builder typeQuota(StorageType type, long quota) {
this.typeQuota[type.ordinal()] = quota;
super.typeQuota(type, quota);
return this;
}
@Override
public Builder typeConsumed(StorageType type, long consumed) {
this.typeConsumed[type.ordinal()] = consumed;
super.typeConsumed(type, consumed);
return this;
}
@Override
public Builder typeQuota(long typeQuota[]) {
for (int i = 0; i < typeQuota.length; i++) {
this.typeQuota[i] = typeQuota[i];
}
super.typeQuota(typeQuota);
return this;
}
public ContentSummary build() {
return new ContentSummary(length, fileCount, directoryCount, quota,
spaceConsumed, spaceQuota, typeConsumed, typeQuota);
// Set it in case applications call QuotaUsage#getFileAndDirectoryCount.
super.fileAndDirectoryCount(this.fileCount + this.directoryCount);
return new ContentSummary(this);
}
private long length;
private long fileCount;
private long directoryCount;
private long quota;
private long spaceConsumed;
private long spaceQuota;
private long typeConsumed[];
private long typeQuota[];
}
/** Constructor deprecated by ContentSummary.Builder*/
@ -143,24 +131,17 @@ public class ContentSummary implements Writable{
this.length = length;
this.fileCount = fileCount;
this.directoryCount = directoryCount;
this.quota = quota;
this.spaceConsumed = spaceConsumed;
this.spaceQuota = spaceQuota;
setQuota(quota);
setSpaceConsumed(spaceConsumed);
setSpaceQuota(spaceQuota);
}
/** Constructor for ContentSummary.Builder*/
private ContentSummary(
long length, long fileCount, long directoryCount, long quota,
long spaceConsumed, long spaceQuota, long typeConsumed[],
long typeQuota[]) {
this.length = length;
this.fileCount = fileCount;
this.directoryCount = directoryCount;
this.quota = quota;
this.spaceConsumed = spaceConsumed;
this.spaceQuota = spaceQuota;
this.typeConsumed = typeConsumed;
this.typeQuota = typeQuota;
private ContentSummary(Builder builder) {
super(builder);
this.length = builder.length;
this.fileCount = builder.fileCount;
this.directoryCount = builder.directoryCount;
}
/** @return the length */
@ -172,60 +153,15 @@ public class ContentSummary implements Writable{
/** @return the file count */
public long getFileCount() {return fileCount;}
/** Return the directory quota */
public long getQuota() {return quota;}
/** Returns storage space consumed */
public long getSpaceConsumed() {return spaceConsumed;}
/** Returns storage space quota */
public long getSpaceQuota() {return spaceQuota;}
/** Returns storage type quota */
public long getTypeQuota(StorageType type) {
return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
}
/** Returns storage type consumed*/
public long getTypeConsumed(StorageType type) {
return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
}
/** Returns true if any storage type quota has been set*/
public boolean isTypeQuotaSet() {
if (typeQuota == null) {
return false;
}
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeQuota[t.ordinal()] > 0) {
return true;
}
}
return false;
}
/** Returns true if any storage type consumption information is available*/
public boolean isTypeConsumedAvailable() {
if (typeConsumed == null) {
return false;
}
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeConsumed[t.ordinal()] > 0) {
return true;
}
}
return false;
}
@Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(length);
out.writeLong(fileCount);
out.writeLong(directoryCount);
out.writeLong(quota);
out.writeLong(spaceConsumed);
out.writeLong(spaceQuota);
out.writeLong(getQuota());
out.writeLong(getSpaceConsumed());
out.writeLong(getSpaceQuota());
}
@Override
@ -234,9 +170,29 @@ public class ContentSummary implements Writable{
this.length = in.readLong();
this.fileCount = in.readLong();
this.directoryCount = in.readLong();
this.quota = in.readLong();
this.spaceConsumed = in.readLong();
this.spaceQuota = in.readLong();
setQuota(in.readLong());
setSpaceConsumed(in.readLong());
setSpaceQuota(in.readLong());
}
@Override
public boolean equals(Object to) {
if (this == to) {
return true;
} else if (to instanceof ContentSummary) {
return getLength() == ((ContentSummary) to).getLength() &&
getFileCount() == ((ContentSummary) to).getFileCount() &&
getDirectoryCount() == ((ContentSummary) to).getDirectoryCount() &&
super.equals(to);
} else {
return super.equals(to);
}
}
@Override
public int hashCode() {
long result = getLength() ^ getFileCount() ^ getDirectoryCount();
return ((int) result) ^ super.hashCode();
}
/**
@ -245,35 +201,16 @@ public class ContentSummary implements Writable{
* DIR_COUNT FILE_COUNT CONTENT_SIZE
*/
private static final String SUMMARY_FORMAT = "%12s %12s %18s ";
/**
* Output format:
* <----12----> <------15-----> <------15-----> <------15----->
* QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA
* <----12----> <----12----> <-------18------->
* DIR_COUNT FILE_COUNT CONTENT_SIZE
*/
private static final String QUOTA_SUMMARY_FORMAT = "%12s %15s ";
private static final String SPACE_QUOTA_SUMMARY_FORMAT = "%15s %15s ";
private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%13s %17s ";
private static final String[] HEADER_FIELDS = new String[] { "DIR_COUNT",
"FILE_COUNT", "CONTENT_SIZE"};
private static final String[] QUOTA_HEADER_FIELDS = new String[] { "QUOTA",
"REM_QUOTA", "SPACE_QUOTA", "REM_SPACE_QUOTA" };
private static final String[] SUMMARY_HEADER_FIELDS =
new String[] {"DIR_COUNT", "FILE_COUNT", "CONTENT_SIZE"};
/** The header string */
private static final String HEADER = String.format(
SUMMARY_FORMAT, (Object[]) HEADER_FIELDS);
private static final String SUMMARY_HEADER = String.format(
SUMMARY_FORMAT, (Object[]) SUMMARY_HEADER_FIELDS);
private static final String QUOTA_HEADER = String.format(
QUOTA_SUMMARY_FORMAT + SPACE_QUOTA_SUMMARY_FORMAT,
(Object[]) QUOTA_HEADER_FIELDS) +
HEADER;
private static final String ALL_HEADER = QUOTA_HEADER + SUMMARY_HEADER;
/** default quota display string */
private static final String QUOTA_NONE = "none";
private static final String QUOTA_INF = "inf";
/** Return the header of the output.
* if qOption is false, output directory count, file count, and content size;
@ -283,28 +220,10 @@ public class ContentSummary implements Writable{
* @return the header of the output
*/
public static String getHeader(boolean qOption) {
return qOption ? QUOTA_HEADER : HEADER;
return qOption ? ALL_HEADER : SUMMARY_HEADER;
}
/**
* return the header of with the StorageTypes
*
* @param storageTypes
* @return storage header string
*/
public static String getStorageTypeHeader(List<StorageType> storageTypes) {
StringBuffer header = new StringBuffer();
for (StorageType st : storageTypes) {
/* the field length is 13/17 for quota and remain quota
* as the max length for quota name is ARCHIVE_QUOTA
* and remain quota name REM_ARCHIVE_QUOTA */
String storageName = st.toString();
header.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT, storageName + "_QUOTA",
"REM_" + storageName + "_QUOTA"));
}
return header.toString();
}
/**
* Returns the names of the fields from the summary header.
@ -312,7 +231,7 @@ public class ContentSummary implements Writable{
* @return names of fields as displayed in the header
*/
public static String[] getHeaderFields() {
return HEADER_FIELDS;
return SUMMARY_HEADER_FIELDS;
}
/**
@ -370,41 +289,11 @@ public class ContentSummary implements Writable{
String prefix = "";
if (tOption) {
StringBuffer content = new StringBuffer();
for (StorageType st : types) {
long typeQuota = getTypeQuota(st);
long typeConsumed = getTypeConsumed(st);
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
if (typeQuota > 0) {
quotaStr = formatSize(typeQuota, hOption);
quotaRem = formatSize(typeQuota - typeConsumed, hOption);
}
content.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
quotaStr, quotaRem));
}
return content.toString();
return getTypesQuotaUsage(hOption, types);
}
if (qOption) {
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
String spaceQuotaStr = QUOTA_NONE;
String spaceQuotaRem = QUOTA_INF;
if (quota>0) {
quotaStr = formatSize(quota, hOption);
quotaRem = formatSize(quota-(directoryCount+fileCount), hOption);
}
if (spaceQuota>0) {
spaceQuotaStr = formatSize(spaceQuota, hOption);
spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
}
prefix = String.format(QUOTA_SUMMARY_FORMAT + SPACE_QUOTA_SUMMARY_FORMAT,
quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
prefix = getQuotaUsage(hOption);
}
return prefix + String.format(SUMMARY_FORMAT,

View File

@ -1496,6 +1496,13 @@ public abstract class FileSystem extends Configured implements Closeable {
spaceConsumed(summary[0]).build();
}
/** Return the {@link QuotaUsage} of a given {@link Path}.
* @param f path to use
*/
public QuotaUsage getQuotaUsage(Path f) throws IOException {
return getContentSummary(f);
}
final private static PathFilter DEFAULT_FILTER = new PathFilter() {
@Override
public boolean accept(Path file) {

View File

@ -0,0 +1,359 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.util.Arrays;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.util.StringUtils;
/** Store the quota usage of a directory. */
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class QuotaUsage {
private long fileAndDirectoryCount;
// Make the followings protected so that
// deprecated ContentSummary constructor can use them.
private long quota;
private long spaceConsumed;
private long spaceQuota;
private long[] typeConsumed;
private long[] typeQuota;
/** Builder class for QuotaUsage. */
public static class Builder {
public Builder() {
this.quota = -1;
this.spaceQuota = -1;
typeConsumed = new long[StorageType.values().length];
typeQuota = new long[StorageType.values().length];
for (int i = 0; i < typeQuota.length; i++) {
typeQuota[i] = -1;
}
}
public Builder fileAndDirectoryCount(long count) {
this.fileAndDirectoryCount = count;
return this;
}
public Builder quota(long quota){
this.quota = quota;
return this;
}
public Builder spaceConsumed(long spaceConsumed) {
this.spaceConsumed = spaceConsumed;
return this;
}
public Builder spaceQuota(long spaceQuota) {
this.spaceQuota = spaceQuota;
return this;
}
public Builder typeConsumed(long[] typeConsumed) {
for (int i = 0; i < typeConsumed.length; i++) {
this.typeConsumed[i] = typeConsumed[i];
}
return this;
}
public Builder typeQuota(StorageType type, long quota) {
this.typeQuota[type.ordinal()] = quota;
return this;
}
public Builder typeConsumed(StorageType type, long consumed) {
this.typeConsumed[type.ordinal()] = consumed;
return this;
}
public Builder typeQuota(long[] typeQuota) {
for (int i = 0; i < typeQuota.length; i++) {
this.typeQuota[i] = typeQuota[i];
}
return this;
}
public QuotaUsage build() {
return new QuotaUsage(this);
}
private long fileAndDirectoryCount;
private long quota;
private long spaceConsumed;
private long spaceQuota;
private long[] typeConsumed;
private long[] typeQuota;
}
// Make it protected for the deprecated ContentSummary constructor.
protected QuotaUsage() { }
/** Build the instance based on the builder. */
protected QuotaUsage(Builder builder) {
this.fileAndDirectoryCount = builder.fileAndDirectoryCount;
this.quota = builder.quota;
this.spaceConsumed = builder.spaceConsumed;
this.spaceQuota = builder.spaceQuota;
this.typeConsumed = builder.typeConsumed;
this.typeQuota = builder.typeQuota;
}
protected void setQuota(long quota) {
this.quota = quota;
}
protected void setSpaceConsumed(long spaceConsumed) {
this.spaceConsumed = spaceConsumed;
}
protected void setSpaceQuota(long spaceQuota) {
this.spaceQuota = spaceQuota;
}
/** Return the directory count. */
public long getFileAndDirectoryCount() {
return fileAndDirectoryCount;
}
/** Return the directory quota. */
public long getQuota() {
return quota;
}
/** Return (disk) space consumed. */
public long getSpaceConsumed() {
return spaceConsumed;
}
/** Return (disk) space quota. */
public long getSpaceQuota() {
return spaceQuota;
}
/** Return storage type quota. */
public long getTypeQuota(StorageType type) {
return (typeQuota != null) ? typeQuota[type.ordinal()] : -1;
}
/** Return storage type consumed. */
public long getTypeConsumed(StorageType type) {
return (typeConsumed != null) ? typeConsumed[type.ordinal()] : 0;
}
/** Return storage type quota. */
private long[] getTypesQuota() {
return typeQuota;
}
/** Return storage type quota. */
private long[] getTypesConsumed() {
return typeConsumed;
}
/** Return true if any storage type quota has been set. */
public boolean isTypeQuotaSet() {
if (typeQuota == null) {
return false;
}
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeQuota[t.ordinal()] > 0) {
return true;
}
}
return false;
}
/** Return true if any storage type consumption information is available. */
public boolean isTypeConsumedAvailable() {
if (typeConsumed == null) {
return false;
}
for (StorageType t : StorageType.getTypesSupportingQuota()) {
if (typeConsumed[t.ordinal()] > 0) {
return true;
}
}
return false;
}
@Override
public boolean equals(Object to) {
return (this == to || (to instanceof QuotaUsage &&
getFileAndDirectoryCount() ==
((QuotaUsage) to).getFileAndDirectoryCount() &&
getQuota() == ((QuotaUsage) to).getQuota() &&
getSpaceConsumed() == ((QuotaUsage) to).getSpaceConsumed() &&
getSpaceQuota() == ((QuotaUsage) to).getSpaceQuota() &&
Arrays.equals(getTypesQuota(), ((QuotaUsage) to).getTypesQuota()) &&
Arrays.equals(getTypesConsumed(),
((QuotaUsage) to).getTypesConsumed())));
}
@Override
public int hashCode() {
long result = (getFileAndDirectoryCount() ^ getQuota() ^
getSpaceConsumed() ^ getSpaceQuota());
if (getTypesQuota() != null) {
for (long quota : getTypesQuota()) {
result ^= quota;
}
}
if (getTypesConsumed() != null) {
for (long consumed : getTypesConsumed()) {
result ^= consumed;
}
}
return (int)result;
}
/**
* Output format:
* <----12----> <----15----> <----15----> <----15----> <-------18------->
* QUOTA REMAINING_QUATA SPACE_QUOTA SPACE_QUOTA_REM FILE_NAME
*/
protected static final String QUOTA_STRING_FORMAT = "%12s %15s ";
protected static final String SPACE_QUOTA_STRING_FORMAT = "%15s %15s ";
protected static final String[] QUOTA_HEADER_FIELDS = new String[] {"QUOTA",
"REM_QUOTA", "SPACE_QUOTA", "REM_SPACE_QUOTA"};
protected static final String QUOTA_HEADER = String.format(
QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT,
(Object[]) QUOTA_HEADER_FIELDS);
/**
* Output format:
* <----12----> <------15-----> <------15-----> <------15----->
* QUOTA REM_QUOTA SPACE_QUOTA REM_SPACE_QUOTA
* <----12----> <----12----> <-------18------->
* DIR_COUNT FILE_COUNT CONTENT_SIZE
*/
private static final String STORAGE_TYPE_SUMMARY_FORMAT = "%13s %17s ";
/** Return the header of the output.
* @return the header of the output
*/
public static String getHeader() {
return QUOTA_HEADER;
}
/** default quota display string */
private static final String QUOTA_NONE = "none";
private static final String QUOTA_INF = "inf";
@Override
public String toString() {
return toString(false);
}
public String toString(boolean hOption) {
return toString(hOption, false, null);
}
/** Return the string representation of the object in the output format.
* if hOption is false file sizes are returned in bytes
* if hOption is true file sizes are returned in human readable
*
* @param hOption a flag indicating if human readable output if to be used
* @return the string representation of the object
*/
public String toString(boolean hOption,
boolean tOption, List<StorageType> types) {
if (tOption) {
return getTypesQuotaUsage(hOption, types);
}
return getQuotaUsage(hOption);
}
protected String getQuotaUsage(boolean hOption) {
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
String spaceQuotaStr = QUOTA_NONE;
String spaceQuotaRem = QUOTA_INF;
if (quota > 0) {
quotaStr = formatSize(quota, hOption);
quotaRem = formatSize(quota-fileAndDirectoryCount, hOption);
}
if (spaceQuota > 0) {
spaceQuotaStr = formatSize(spaceQuota, hOption);
spaceQuotaRem = formatSize(spaceQuota - spaceConsumed, hOption);
}
return String.format(QUOTA_STRING_FORMAT + SPACE_QUOTA_STRING_FORMAT,
quotaStr, quotaRem, spaceQuotaStr, spaceQuotaRem);
}
protected String getTypesQuotaUsage(boolean hOption,
List<StorageType> types) {
StringBuffer content = new StringBuffer();
for (StorageType st : types) {
long typeQuota = getTypeQuota(st);
long typeConsumed = getTypeConsumed(st);
String quotaStr = QUOTA_NONE;
String quotaRem = QUOTA_INF;
if (typeQuota > 0) {
quotaStr = formatSize(typeQuota, hOption);
quotaRem = formatSize(typeQuota - typeConsumed, hOption);
}
content.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
quotaStr, quotaRem));
}
return content.toString();
}
/**
* return the header of with the StorageTypes.
*
* @param storageTypes
* @return storage header string
*/
public static String getStorageTypeHeader(List<StorageType> storageTypes) {
StringBuffer header = new StringBuffer();
for (StorageType st : storageTypes) {
/* the field length is 13/17 for quota and remain quota
* as the max length for quota name is ARCHIVE_QUOTA
* and remain quota name REM_ARCHIVE_QUOTA */
String storageName = st.toString();
header.append(String.format(STORAGE_TYPE_SUMMARY_FORMAT,
storageName + "_QUOTA", "REM_" + storageName + "_QUOTA"));
}
return header.toString();
}
/**
* Formats a size to be human readable or in bytes.
* @param size value to be formatted
* @param humanReadable flag indicating human readable or not
* @return String representation of the size
*/
private String formatSize(long size, boolean humanReadable) {
return humanReadable
? StringUtils.TraditionalBinaryPrefix.long2String(size, "", 1)
: String.valueOf(size);
}
}

View File

@ -29,6 +29,7 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FsShell;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
/**
@ -50,11 +51,14 @@ public class Count extends FsCommand {
private static final String OPTION_HUMAN = "h";
private static final String OPTION_HEADER = "v";
private static final String OPTION_TYPE = "t";
//return the quota, namespace count and disk space usage.
private static final String OPTION_QUOTA_AND_USAGE = "u";
public static final String NAME = "count";
public static final String USAGE =
"[-" + OPTION_QUOTA + "] [-" + OPTION_HUMAN + "] [-" + OPTION_HEADER
+ "] [-" + OPTION_TYPE + " [<storage type>]] <path> ...";
+ "] [-" + OPTION_TYPE + " [<storage type>]] [-" +
OPTION_QUOTA_AND_USAGE + "] <path> ...";
public static final String DESCRIPTION =
"Count the number of directories, files and bytes under the paths\n" +
"that match the specified file pattern. The output columns are:\n" +
@ -74,12 +78,15 @@ public class Count extends FsCommand {
OPTION_TYPE + " option, \n" +
"it displays the quota and usage for the specified types. \n" +
"Otherwise, it displays the quota and usage for all the storage \n" +
"types that support quota";
"types that support quota \n" +
"The -" + OPTION_QUOTA_AND_USAGE + " option shows the quota and \n" +
"the usage against the quota without the detailed content summary.";
private boolean showQuotas;
private boolean humanReadable;
private boolean showQuotabyType;
private List<StorageType> storageTypes = null;
private boolean showQuotasAndUsageOnly;
/** Constructor */
public Count() {}
@ -99,7 +106,7 @@ public class Count extends FsCommand {
@Override
protected void processOptions(LinkedList<String> args) {
CommandFormat cf = new CommandFormat(1, Integer.MAX_VALUE,
OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER);
OPTION_QUOTA, OPTION_HUMAN, OPTION_HEADER, OPTION_QUOTA_AND_USAGE);
cf.addOptionWithValue(OPTION_TYPE);
cf.parse(args);
if (args.isEmpty()) { // default path is the current working directory
@ -107,8 +114,9 @@ public class Count extends FsCommand {
}
showQuotas = cf.getOpt(OPTION_QUOTA);
humanReadable = cf.getOpt(OPTION_HUMAN);
showQuotasAndUsageOnly = cf.getOpt(OPTION_QUOTA_AND_USAGE);
if (showQuotas) {
if (showQuotas || showQuotasAndUsageOnly) {
String types = cf.getOptValue(OPTION_TYPE);
if (null != types) {
@ -121,9 +129,13 @@ public class Count extends FsCommand {
if (cf.getOpt(OPTION_HEADER)) {
if (showQuotabyType) {
out.println(ContentSummary.getStorageTypeHeader(storageTypes) + "PATHNAME");
out.println(QuotaUsage.getStorageTypeHeader(storageTypes) + "PATHNAME");
} else {
out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME");
if (showQuotasAndUsageOnly) {
out.println(QuotaUsage.getHeader() + "PATHNAME");
} else {
out.println(ContentSummary.getHeader(showQuotas) + "PATHNAME");
}
}
}
}
@ -145,9 +157,14 @@ public class Count extends FsCommand {
@Override
protected void processPath(PathData src) throws IOException {
ContentSummary summary = src.fs.getContentSummary(src.path);
out.println(summary.toString(showQuotas, isHumanReadable(),
showQuotabyType, storageTypes) + src);
if (showQuotasAndUsageOnly || showQuotabyType) {
QuotaUsage usage = src.fs.getQuotaUsage(src.path);
out.println(usage.toString(isHumanReadable(), showQuotabyType,
storageTypes) + src);
} else {
ContentSummary summary = src.fs.getContentSummary(src.path);
out.println(summary.toString(showQuotas, isHumanReadable()) + src);
}
}
/**

View File

@ -44,6 +44,7 @@ import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.AclEntry;
import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Progressable;
@ -389,6 +390,10 @@ class ChRootedFileSystem extends FilterFileSystem {
return fs.getContentSummary(fullPath(f));
}
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
return fs.getQuotaUsage(fullPath(f));
}
private static Path rootPath = new Path(Path.SEPARATOR);

View File

@ -56,6 +56,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.AclUtil;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.viewfs.InodeTree.INode;
import org.apache.hadoop.fs.viewfs.InodeTree.INodeLink;
import org.apache.hadoop.security.AccessControlException;
@ -694,6 +695,13 @@ public class ViewFileSystem extends FileSystem {
return res.targetFileSystem.getContentSummary(res.remainingPath);
}
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
InodeTree.ResolveResult<FileSystem> res =
fsState.resolve(getUriPath(f), true);
return res.targetFileSystem.getQuotaUsage(res.remainingPath);
}
@Override
public void setWriteChecksum(final boolean writeChecksum) {
List<InodeTree.MountPoint<FileSystem>> mountPoints =
@ -1066,5 +1074,10 @@ public class ViewFileSystem extends FileSystem {
checkPathIsSlash(path);
throw readOnlyMountTable("deleteSnapshot", path);
}
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
throw new NotInMountpointException(f, "getQuotaUsage");
}
}
}

View File

@ -170,12 +170,16 @@ Similar to get command, except that the destination is restricted to a local fil
count
-----
Usage: `hadoop fs -count [-q] [-h] [-v] <paths> `
Usage: `hadoop fs -count [-q] [-h] [-v] [-t [<storage type>]] [-u] <paths> `
Count the number of directories, files and bytes under the paths that match the specified file pattern. The output columns with -count are: DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
Count the number of directories, files and bytes under the paths that match the specified file pattern. Get the quota and the usage. The output columns with -count are: DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
The output columns with -count -q are: QUOTA, REMAINING\_QUOTA, SPACE\_QUOTA, REMAINING\_SPACE\_QUOTA, DIR\_COUNT, FILE\_COUNT, CONTENT\_SIZE, PATHNAME
The output columns with -count -u are: QUOTA, REMAINING\_QUOTA, SPACE\_QUOTA, REMAINING\_SPACE\_QUOTA
The -t option shows the quota and usage for each storage type.
The -h option shows sizes in human readable format.
The -v option displays a header line.
@ -186,6 +190,9 @@ Example:
* `hadoop fs -count -q hdfs://nn1.example.com/file1`
* `hadoop fs -count -q -h hdfs://nn1.example.com/file1`
* `hadoop fs -count -q -h -v hdfs://nn1.example.com/file1`
* `hadoop fs -count -u hdfs://nn1.example.com/file1`
* `hadoop fs -count -u -h hdfs://nn1.example.com/file1`
* `hadoop fs -count -u -h -v hdfs://nn1.example.com/file1`
Exit Code:

View File

@ -68,7 +68,7 @@ public class CLITestHelper {
protected String clitestDataDir = null;
protected String username = null;
/**
* Read the test config file - testConfig.xml
* Read the test config file - testConf.xml
*/
protected void readTestConfigFile() {
String testConfigFile = getTestFile();

View File

@ -131,6 +131,7 @@ public class TestFilterFileSystem {
public String getScheme();
public Path fixRelativePart(Path p);
public ContentSummary getContentSummary(Path f);
public QuotaUsage getQuotaUsage(Path f);
}
@Test

View File

@ -112,6 +112,7 @@ public class TestHarFileSystem {
public short getReplication(Path src);
public void processDeleteOnExit();
public ContentSummary getContentSummary(Path f);
public QuotaUsage getQuotaUsage(Path f);
public FsStatus getStatus();
public FileStatus[] listStatus(Path f, PathFilter filter);
public FileStatus[] listStatus(Path[] files);

View File

@ -0,0 +1,146 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import static org.junit.Assert.assertEquals;
import org.junit.Test;
public class TestQuotaUsage {
// check the empty constructor correctly initialises the object
@Test
public void testConstructorEmpty() {
QuotaUsage quotaUsage = new QuotaUsage.Builder().build();
assertEquals("getQuota", -1, quotaUsage.getQuota());
assertEquals("getSpaceConsumed", 0, quotaUsage.getSpaceConsumed());
assertEquals("getSpaceQuota", -1, quotaUsage.getSpaceQuota());
}
// check the full constructor with quota information
@Test
public void testConstructorWithQuota() {
long fileAndDirCount = 22222;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66666;
QuotaUsage quotaUsage = new QuotaUsage.Builder().
fileAndDirectoryCount(fileAndDirCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
assertEquals("getFileAndDirectoryCount", fileAndDirCount,
quotaUsage.getFileAndDirectoryCount());
assertEquals("getQuota", quota, quotaUsage.getQuota());
assertEquals("getSpaceConsumed", spaceConsumed,
quotaUsage.getSpaceConsumed());
assertEquals("getSpaceQuota", spaceQuota, quotaUsage.getSpaceQuota());
}
// check the constructor with quota information
@Test
public void testConstructorNoQuota() {
long spaceConsumed = 11111;
long fileAndDirCount = 22222;
QuotaUsage quotaUsage = new QuotaUsage.Builder().
fileAndDirectoryCount(fileAndDirCount).
spaceConsumed(spaceConsumed).build();
assertEquals("getFileAndDirectoryCount", fileAndDirCount,
quotaUsage.getFileAndDirectoryCount());
assertEquals("getQuota", -1, quotaUsage.getQuota());
assertEquals("getSpaceConsumed", spaceConsumed,
quotaUsage.getSpaceConsumed());
assertEquals("getSpaceQuota", -1, quotaUsage.getSpaceQuota());
}
// check the header
@Test
public void testGetHeader() {
String header = " QUOTA REM_QUOTA SPACE_QUOTA "
+ "REM_SPACE_QUOTA ";
assertEquals(header, QuotaUsage.getHeader());
}
// check the toString method with quotas
@Test
public void testToStringWithQuota() {
long fileAndDirCount = 55555;
long quota = 44444;
long spaceConsumed = 55555;
long spaceQuota = 66665;
QuotaUsage quotaUsage = new QuotaUsage.Builder().
fileAndDirectoryCount(fileAndDirCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected =" 44444 -11111 66665" +
" 11110 ";
assertEquals(expected, quotaUsage.toString());
}
// check the toString method with quotas
@Test
public void testToStringNoQuota() {
QuotaUsage quotaUsage = new QuotaUsage.Builder().
fileAndDirectoryCount(1234).build();
String expected = " none inf none"
+ " inf ";
assertEquals(expected, quotaUsage.toString());
}
// check the toString method with quotas
@Test
public void testToStringHumanWithQuota() {
long fileAndDirCount = 222255555;
long quota = 222256578;
long spaceConsumed = 1073741825;
long spaceQuota = 1;
QuotaUsage quotaUsage = new QuotaUsage.Builder().
fileAndDirectoryCount(fileAndDirCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).build();
String expected = " 212.0 M 1023 1 "
+ " -1 G ";
assertEquals(expected, quotaUsage.toString(true));
}
// check the equality
@Test
public void testCompareQuotaUsage() {
long fileAndDirCount = 222255555;
long quota = 222256578;
long spaceConsumed = 1073741825;
long spaceQuota = 1;
long SSDspaceConsumed = 100000;
long SSDQuota = 300000;
QuotaUsage quotaUsage1 = new QuotaUsage.Builder().
fileAndDirectoryCount(fileAndDirCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).
typeConsumed(StorageType.SSD, SSDQuota).
typeQuota(StorageType.SSD, SSDQuota).
build();
QuotaUsage quotaUsage2 = new QuotaUsage.Builder().
fileAndDirectoryCount(fileAndDirCount).quota(quota).
spaceConsumed(spaceConsumed).spaceQuota(spaceQuota).
typeConsumed(StorageType.SSD, SSDQuota).
typeQuota(StorageType.SSD, SSDQuota).
build();
assertEquals(quotaUsage1, quotaUsage2);
}
}

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FilterFileSystem;
@ -47,6 +48,7 @@ public class TestCount {
private static final String NO_QUOTAS = "Content summary without quotas";
private static final String HUMAN = "human: ";
private static final String BYTES = "bytes: ";
private static final String QUOTAS_AND_USAGE = "quotas and usage";
private static Configuration conf;
private static FileSystem mockFs;
private static FileStatus fileStat;
@ -344,7 +346,20 @@ public class TestCount {
}
@Test
public void processPathWithQuotasByMultipleStorageTypesContent() throws Exception {
public void processPathWithQuotasByMultipleStorageTypesContent()
throws Exception {
processMultipleStorageTypesContent(false);
}
@Test
public void processPathWithQuotaUsageByMultipleStorageTypesContent()
throws Exception {
processMultipleStorageTypesContent(true);
}
// "-q -t" is the same as "-u -t"; only return the storage quota and usage.
private void processMultipleStorageTypesContent(boolean quotaUsageOnly)
throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
@ -356,7 +371,7 @@ public class TestCount {
count.out = out;
LinkedList<String> options = new LinkedList<String>();
options.add("-q");
options.add(quotaUsageOnly ? "-u" : "-q");
options.add("-t");
options.add("SSD,DISK");
options.add("dummy");
@ -431,7 +446,8 @@ public class TestCount {
public void getUsage() {
Count count = new Count();
String actual = count.getUsage();
String expected = "-count [-q] [-h] [-v] [-t [<storage type>]] <path> ...";
String expected =
"-count [-q] [-h] [-v] [-t [<storage type>]] [-u] <path> ...";
assertEquals("Count.getUsage", expected, actual);
}
@ -454,11 +470,47 @@ public class TestCount {
+ "If a comma-separated list of storage types is given after the -t option, \n"
+ "it displays the quota and usage for the specified types. \n"
+ "Otherwise, it displays the quota and usage for all the storage \n"
+ "types that support quota";
+ "types that support quota \n"
+ "The -u option shows the quota and \n"
+ "the usage against the quota without the detailed content summary.";
assertEquals("Count.getDescription", expected, actual);
}
@Test
public void processPathWithQuotaUsageHuman() throws Exception {
processPathWithQuotaUsage(false);
}
@Test
public void processPathWithQuotaUsageRawBytes() throws Exception {
processPathWithQuotaUsage(true);
}
private void processPathWithQuotaUsage(boolean rawBytes) throws Exception {
Path path = new Path("mockfs:/test");
when(mockFs.getFileStatus(eq(path))).thenReturn(fileStat);
PathData pathData = new PathData(path.toString(), conf);
PrintStream out = mock(PrintStream.class);
Count count = new Count();
count.out = out;
LinkedList<String> options = new LinkedList<String>();
if (!rawBytes) {
options.add("-h");
}
options.add("-u");
options.add("dummy");
count.processOptions(options);
count.processPath(pathData);
String withStorageType = (rawBytes ? BYTES : HUMAN) + QUOTAS_AND_USAGE +
pathData.toString();
verify(out).println(withStorageType);
verifyNoMoreInteractions(out);
}
// mock content system
static class MockContentSummary extends ContentSummary {
@ -469,19 +521,7 @@ public class TestCount {
}
@Override
public String toString(boolean qOption, boolean hOption,
boolean tOption, List<StorageType> types) {
if (tOption) {
StringBuffer result = new StringBuffer();
result.append(hOption ? HUMAN : BYTES);
for (StorageType type : types) {
result.append(type.toString());
result.append(" ");
}
return result.toString();
}
public String toString(boolean qOption, boolean hOption) {
if (qOption) {
if (hOption) {
return (HUMAN + WITH_QUOTAS);
@ -498,6 +538,36 @@ public class TestCount {
}
}
// mock content system
static class MockQuotaUsage extends QuotaUsage {
@SuppressWarnings("deprecation")
// suppress warning on the usage of deprecated ContentSummary constructor
public MockQuotaUsage() {
}
@Override
public String toString(boolean hOption,
boolean tOption, List<StorageType> types) {
if (tOption) {
StringBuffer result = new StringBuffer();
result.append(hOption ? HUMAN : BYTES);
for (StorageType type : types) {
result.append(type.toString());
result.append(" ");
}
return result.toString();
}
if (hOption) {
return (HUMAN + QUOTAS_AND_USAGE);
} else {
return (BYTES + QUOTAS_AND_USAGE);
}
}
}
// mock file system for use in testing
static class MockFileSystem extends FilterFileSystem {
Configuration conf;
@ -525,5 +595,10 @@ public class TestCount {
public Configuration getConf() {
return conf;
}
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
return new MockQuotaUsage();
}
}
}

View File

@ -270,7 +270,7 @@
<comparators>
<comparator>
<type>RegexpComparator</type>
<expected-output>^-count \[-q\] \[-h\] \[-v\] \[-t \[&lt;storage type&gt;\]\] &lt;path&gt; \.\.\. :( )*</expected-output>
<expected-output>^-count \[-q\] \[-h\] \[-v\] \[-t \[&lt;storage type&gt;\]\] \[-u\] &lt;path&gt; \.\.\. :( )*</expected-output>
</comparator>
<comparator>
<type>RegexpComparator</type>

View File

@ -87,6 +87,7 @@ import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
@ -163,6 +164,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RpcNoSuchMethodException;
import org.apache.hadoop.net.DNS;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
@ -2441,6 +2443,31 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
}
}
/**
* Get {@link QuotaUsage} rooted at the specified directory.
* @param src The string representation of the path
*
* @see ClientProtocol#getQuotaUsage(String)
*/
QuotaUsage getQuotaUsage(String src) throws IOException {
checkOpen();
try (TraceScope ignored = newPathTraceScope("getQuotaUsage", src)) {
return namenode.getQuotaUsage(src);
} catch(RemoteException re) {
IOException ioe = re.unwrapRemoteException(AccessControlException.class,
FileNotFoundException.class,
UnresolvedPathException.class,
RpcNoSuchMethodException.class);
if (ioe instanceof RpcNoSuchMethodException) {
LOG.debug("The version of namenode doesn't support getQuotaUsage API." +
" Fall back to use getContentSummary API.");
return getContentSummary(src);
} else {
throw ioe;
}
}
}
/**
* Sets or resets quotas for a directory.
* @see ClientProtocol#setQuota(String, long, long, StorageType)

View File

@ -53,6 +53,7 @@ import org.apache.hadoop.fs.XAttrSetFlag;
import org.apache.hadoop.fs.Options.ChecksumOpt;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
@ -723,6 +724,24 @@ public class DistributedFileSystem extends FileSystem {
}.resolve(this, absF);
}
@Override
public QuotaUsage getQuotaUsage(Path f) throws IOException {
statistics.incrementReadOps(1);
Path absF = fixRelativePart(f);
return new FileSystemLinkResolver<QuotaUsage>() {
@Override
public QuotaUsage doCall(final Path p)
throws IOException, UnresolvedLinkException {
return dfs.getQuotaUsage(getPathName(p));
}
@Override
public QuotaUsage next(final FileSystem fs, final Path p)
throws IOException {
return fs.getQuotaUsage(p);
}
}.resolve(this, absF);
}
/** Set a directory's quotas
* @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String,
* long, long, StorageType)

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
@ -1514,4 +1515,17 @@ public interface ClientProtocol {
*/
@Idempotent
ErasureCodingPolicy getErasureCodingPolicy(String src) throws IOException;
/**
* Get {@link QuotaUsage} rooted at the specified directory.
* @param path The string representation of the path
*
* @throws AccessControlException permission denied
* @throws java.io.FileNotFoundException file <code>path</code> is not found
* @throws org.apache.hadoop.fs.UnresolvedLinkException if <code>path</code>
* contains a symlink.
* @throws IOException If an I/O error occurred
*/
@Idempotent
QuotaUsage getQuotaUsage(String path) throws IOException;
}

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
@ -110,6 +111,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLin
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportResponseProto;
@ -1544,4 +1546,16 @@ public class ClientNamenodeProtocolTranslatorPB implements
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public QuotaUsage getQuotaUsage(String path) throws IOException {
GetQuotaUsageRequestProto req =
GetQuotaUsageRequestProto.newBuilder().setPath(path).build();
try {
return PBHelperClient.convert(rpcProxy.getQuotaUsage(null, req)
.getUsage());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
}

View File

@ -43,6 +43,7 @@ import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.fs.XAttr;
import org.apache.hadoop.fs.XAttrSetFlag;
@ -139,6 +140,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.HdfsFileStatusProto.File
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto.Builder;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlocksProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.QuotaUsageProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.RollingUpgradeStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.SnapshotDiffReportProto;
@ -1417,12 +1419,37 @@ public class PBHelperClient {
spaceConsumed(cs.getSpaceConsumed()).
spaceQuota(cs.getSpaceQuota());
if (cs.hasTypeQuotaInfos()) {
for (HdfsProtos.StorageTypeQuotaInfoProto info :
cs.getTypeQuotaInfos().getTypeQuotaInfoList()) {
StorageType type = convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
addStorageTypes(cs.getTypeQuotaInfos(), builder);
}
return builder.build();
}
public static QuotaUsage convert(QuotaUsageProto qu) {
if (qu == null) {
return null;
}
QuotaUsage.Builder builder = new QuotaUsage.Builder();
builder.fileAndDirectoryCount(qu.getFileAndDirectoryCount()).
quota(qu.getQuota()).
spaceConsumed(qu.getSpaceConsumed()).
spaceQuota(qu.getSpaceQuota());
if (qu.hasTypeQuotaInfos()) {
addStorageTypes(qu.getTypeQuotaInfos(), builder);
}
return builder.build();
}
public static QuotaUsageProto convert(QuotaUsage qu) {
if (qu == null) {
return null;
}
QuotaUsageProto.Builder builder = QuotaUsageProto.newBuilder();
builder.setFileAndDirectoryCount(qu.getFileAndDirectoryCount()).
setQuota(qu.getQuota()).
setSpaceConsumed(qu.getSpaceConsumed()).
setSpaceQuota(qu.getSpaceQuota());
if (qu.isTypeQuotaSet() || qu.isTypeConsumedAvailable()) {
builder.setTypeQuotaInfos(getBuilder(qu));
}
return builder.build();
}
@ -1989,22 +2016,38 @@ public class PBHelperClient {
setSpaceQuota(cs.getSpaceQuota());
if (cs.isTypeQuotaSet() || cs.isTypeConsumedAvailable()) {
HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
for (StorageType t: StorageType.getTypesSupportingQuota()) {
HdfsProtos.StorageTypeQuotaInfoProto info =
HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
setType(convertStorageType(t)).
setConsumed(cs.getTypeConsumed(t)).
setQuota(cs.getTypeQuota(t)).
build();
isb.addTypeQuotaInfo(info);
}
builder.setTypeQuotaInfos(isb);
builder.setTypeQuotaInfos(getBuilder(cs));
}
return builder.build();
}
private static void addStorageTypes(
HdfsProtos.StorageTypeQuotaInfosProto typeQuotaInfos,
QuotaUsage.Builder builder) {
for (HdfsProtos.StorageTypeQuotaInfoProto info :
typeQuotaInfos.getTypeQuotaInfoList()) {
StorageType type = convertStorageType(info.getType());
builder.typeConsumed(type, info.getConsumed());
builder.typeQuota(type, info.getQuota());
}
}
private static HdfsProtos.StorageTypeQuotaInfosProto.Builder getBuilder(
QuotaUsage qu) {
HdfsProtos.StorageTypeQuotaInfosProto.Builder isb =
HdfsProtos.StorageTypeQuotaInfosProto.newBuilder();
for (StorageType t: StorageType.getTypesSupportingQuota()) {
HdfsProtos.StorageTypeQuotaInfoProto info =
HdfsProtos.StorageTypeQuotaInfoProto.newBuilder().
setType(convertStorageType(t)).
setConsumed(qu.getTypeConsumed(t)).
setQuota(qu.getTypeQuota(t)).
build();
isb.addTypeQuotaInfo(info);
}
return isb;
}
public static DatanodeStorageProto convert(DatanodeStorage s) {
return DatanodeStorageProto.newBuilder()
.setState(convertState(s.getState()))

View File

@ -582,6 +582,14 @@ message GetContentSummaryResponseProto {
required ContentSummaryProto summary = 1;
}
message GetQuotaUsageRequestProto {
required string path = 1;
}
message GetQuotaUsageResponseProto {
required QuotaUsageProto usage = 1;
}
message SetQuotaRequestProto {
required string path = 1;
required uint64 namespaceQuota = 2;
@ -879,4 +887,6 @@ service ClientNamenodeProtocol {
returns(GetErasureCodingPoliciesResponseProto);
rpc getErasureCodingPolicy(GetErasureCodingPolicyRequestProto)
returns(GetErasureCodingPolicyResponseProto);
rpc getQuotaUsage(GetQuotaUsageRequestProto)
returns(GetQuotaUsageResponseProto);
}

View File

@ -138,6 +138,17 @@ message ContentSummaryProto {
optional StorageTypeQuotaInfosProto typeQuotaInfos = 7;
}
/**
* Summary of quota usage of a directory
*/
message QuotaUsageProto {
required uint64 fileAndDirectoryCount = 1;
required uint64 quota = 2;
required uint64 spaceConsumed = 3;
required uint64 spaceQuota = 4;
optional StorageTypeQuotaInfosProto typeQuotaInfos = 5;
}
/**
* Storage type quota and usage information of a file or directory
*/

View File

@ -1836,6 +1836,9 @@ Release 2.8.0 - UNRELEASED
HDFS-9655. NN should start JVM pause monitor before loading fsimage.
(John Zhuge via Lei (Eddy) Xu)
HDFS-8898. Create API and command-line argument to get quota and quota
usage without detailed content summary. (Ming Ma via kihwal)
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

View File

@ -28,6 +28,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
@ -120,6 +121,8 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLis
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetQuotaUsageResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetServerDefaultsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshotDiffReportRequestProto;
@ -1567,4 +1570,17 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
throw new ServiceException(e);
}
}
@Override
public GetQuotaUsageResponseProto getQuotaUsage(
RpcController controller, GetQuotaUsageRequestProto req)
throws ServiceException {
try {
QuotaUsage result = server.getQuotaUsage(req.getPath());
return GetQuotaUsageResponseProto.newBuilder()
.setUsage(PBHelperClient.convert(result)).build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
}

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.fs.FileEncryptionInfo;
import org.apache.hadoop.fs.InvalidPathException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -626,6 +627,58 @@ class FSDirStatAndListingOp {
}
}
static QuotaUsage getQuotaUsage(
FSDirectory fsd, String src) throws IOException {
byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src);
FSPermissionChecker pc = fsd.getPermissionChecker();
final INodesInPath iip;
fsd.readLock();
try {
src = fsd.resolvePath(pc, src, pathComponents);
iip = fsd.getINodesInPath(src, false);
if (fsd.isPermissionEnabled()) {
fsd.checkPermission(pc, iip, false, null, null, null,
FsAction.READ_EXECUTE);
}
} finally {
fsd.readUnlock();
}
QuotaUsage usage = getQuotaUsageInt(fsd, iip);
if (usage != null) {
return usage;
} else {
//If quota isn't set, fall back to getContentSummary.
return getContentSummaryInt(fsd, iip);
}
}
private static QuotaUsage getQuotaUsageInt(FSDirectory fsd, INodesInPath iip)
throws IOException {
fsd.readLock();
try {
INode targetNode = iip.getLastINode();
QuotaUsage usage = null;
if (targetNode.isDirectory()) {
DirectoryWithQuotaFeature feature =
targetNode.asDirectory().getDirectoryWithQuotaFeature();
if (feature != null) {
QuotaCounts counts = feature.getSpaceConsumed();
QuotaCounts quotas = feature.getQuota();
usage = new QuotaUsage.Builder().
fileAndDirectoryCount(counts.getNameSpace()).
quota(quotas.getNameSpace()).
spaceConsumed(counts.getStorageSpace()).
spaceQuota(quotas.getStorageSpace()).
typeConsumed(counts.getTypeSpaces().asArray()).
typeQuota(quotas.getTypeSpaces().asArray()).build();
}
}
return usage;
} finally {
fsd.readUnlock();
}
}
static class GetBlockLocationsResult {
final boolean updateAccessTime;
final LocatedBlocks blocks;

View File

@ -153,6 +153,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.ha.ServiceFailedException;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@ -2853,6 +2854,36 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
}
}
/**
* Get the quota usage for a specific file/dir.
*
* @param src The string representation of the path to the file
*
* @throws AccessControlException if access is denied
* @throws UnresolvedLinkException if a symlink is encountered.
* @throws FileNotFoundException if no file exists
* @throws StandbyException
* @throws IOException for issues with writing to the audit log
*
* @return object containing information regarding the file
* or null if file not found
*/
QuotaUsage getQuotaUsage(final String src) throws IOException {
checkOperation(OperationCategory.READ);
readLock();
boolean success = true;
try {
checkOperation(OperationCategory.READ);
return FSDirStatAndListingOp.getQuotaUsage(dir, src);
} catch (AccessControlException ace) {
success = false;
throw ace;
} finally {
readUnlock();
logAuditEvent(success, "quotaUsage", src);
}
}
/**
* Set the namespace quota and storage space quota for a directory.
* See {@link ClientProtocol#setQuota(String, long, long, StorageType)} for the

View File

@ -63,6 +63,7 @@ import org.apache.hadoop.fs.permission.AclStatus;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.ha.HAServiceStatus;
import org.apache.hadoop.ha.HealthCheckFailedException;
import org.apache.hadoop.ha.ServiceFailedException;
@ -1265,6 +1266,12 @@ class NameNodeRpcServer implements NamenodeProtocols {
return namesystem.getContentSummary(path);
}
@Override // ClientProtocol
public QuotaUsage getQuotaUsage(String path) throws IOException {
checkNNStartup();
return namesystem.getQuotaUsage(path);
}
@Override // ClientProtocol
public void setQuota(String path, long namespaceQuota, long storagespaceQuota,
StorageType type)

View File

@ -29,6 +29,7 @@ import static org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_CLIENT_WRIT
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.IOException;
@ -44,6 +45,8 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
import org.apache.hadoop.fs.FsConstants;
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.AfterClass;
@ -161,6 +164,54 @@ public class TestViewFsDefaultValue {
assertEquals(500, cs.getSpaceQuota());
}
/**
* Test that getQuotaUsage can be retrieved on the client side.
*/
@Test
public void testGetQuotaUsage() throws IOException {
FileSystem hFs = cluster.getFileSystem(0);
final DistributedFileSystem dfs = (DistributedFileSystem)hFs;
dfs.setQuota(testFileDirPath, 100, 500);
QuotaUsage qu = vfs.getQuotaUsage(testFileDirPath);
assertEquals(100, qu.getQuota());
assertEquals(500, qu.getSpaceQuota());
}
/**
* Test that getQuotaUsage can be retrieved on the client side if
* storage types are defined.
*/
@Test
public void testGetQuotaUsageWithStorageTypes() throws IOException {
FileSystem hFs = cluster.getFileSystem(0);
final DistributedFileSystem dfs = (DistributedFileSystem)hFs;
dfs.setQuotaByStorageType(testFileDirPath, StorageType.SSD, 500);
dfs.setQuotaByStorageType(testFileDirPath, StorageType.DISK, 600);
QuotaUsage qu = vfs.getQuotaUsage(testFileDirPath);
assertEquals(500, qu.getTypeQuota(StorageType.SSD));
assertEquals(600, qu.getTypeQuota(StorageType.DISK));
}
/**
* Test that getQuotaUsage can be retrieved on the client side if
* quota isn't defined.
*/
@Test
public void testGetQuotaUsageWithQuotaDefined() throws IOException {
FileSystem hFs = cluster.getFileSystem(0);
final DistributedFileSystem dfs = (DistributedFileSystem)hFs;
dfs.setQuota(testFileDirPath, -1, -1);
dfs.setQuotaByStorageType(testFileDirPath, StorageType.SSD, -1);
dfs.setQuotaByStorageType(testFileDirPath, StorageType.DISK, -1);
QuotaUsage qu = vfs.getQuotaUsage(testFileDirPath);
assertEquals(-1, qu.getTypeQuota(StorageType.SSD));
assertEquals(-1, qu.getQuota());
assertEquals(-1, qu.getSpaceQuota());
assertEquals(2, qu.getFileAndDirectoryCount());
assertEquals(0, qu.getTypeConsumed(StorageType.SSD));
assertTrue(qu.getSpaceConsumed() > 0);
}
@AfterClass
public static void cleanup() throws IOException {
fHdfs.delete(new Path(testFileName), true);

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.QuotaUsage;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
@ -134,6 +135,7 @@ public class TestQuota {
// 4: count -q /test
ContentSummary c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getFileCount()+c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 3);
assertEquals(c.getSpaceConsumed(), fileLen*replication);
@ -141,10 +143,12 @@ public class TestQuota {
// 5: count -q /test/data0
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getFileCount()+c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), -1);
// check disk space consumed
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getSpaceConsumed(), fileLen*replication);
// 6: create a directory /test/data1
@ -172,12 +176,14 @@ public class TestQuota {
// 8: clear quota /test
runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false);
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), spaceQuota);
// 9: clear quota /test/data0
runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, false);
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getQuota(), -1);
// 10: create a file /test/datafile1
@ -198,6 +204,7 @@ public class TestQuota {
// 9.s: clear diskspace quota
runCommand(admin, false, "-clrSpaceQuota", parent.toString());
c = dfs.getContentSummary(parent);
compareQuotaUsage(c, dfs, parent);
assertEquals(c.getQuota(), -1);
assertEquals(c.getSpaceQuota(), -1);
@ -224,6 +231,7 @@ public class TestQuota {
}
assertTrue(hasException);
c = dfs.getContentSummary(childDir0);
compareQuotaUsage(c, dfs, childDir0);
assertEquals(c.getDirectoryCount()+c.getFileCount(), 1);
assertEquals(c.getQuota(), 1);
@ -362,7 +370,7 @@ public class TestQuota {
}
assertTrue(hasException);
assertEquals(4, cluster.getNamesystem().getFSDirectory().getYieldCount());
assertEquals(5, cluster.getNamesystem().getFSDirectory().getYieldCount());
} finally {
cluster.shutdown();
}
@ -387,6 +395,7 @@ public class TestQuota {
final Path quotaDir1 = new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1, 6, HdfsConstants.QUOTA_DONT_SET);
ContentSummary c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 6);
@ -394,6 +403,7 @@ public class TestQuota {
final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir2, 7, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
@ -402,6 +412,7 @@ public class TestQuota {
assertTrue(dfs.mkdirs(quotaDir3));
dfs.setQuota(quotaDir3, 2, HdfsConstants.QUOTA_DONT_SET);
c = dfs.getContentSummary(quotaDir3);
compareQuotaUsage(c, dfs, quotaDir3);
assertEquals(c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), 2);
@ -409,6 +420,7 @@ public class TestQuota {
Path tempPath = new Path(quotaDir3, "nqdir32");
assertTrue(dfs.mkdirs(tempPath));
c = dfs.getContentSummary(quotaDir3);
compareQuotaUsage(c, dfs, quotaDir3);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 2);
@ -422,6 +434,7 @@ public class TestQuota {
}
assertTrue(hasException);
c = dfs.getContentSummary(quotaDir3);
compareQuotaUsage(c, dfs, quotaDir3);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 2);
@ -429,9 +442,11 @@ public class TestQuota {
tempPath = new Path(quotaDir2, "nqdir31");
assertTrue(dfs.mkdirs(tempPath));
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
@ -449,9 +464,11 @@ public class TestQuota {
tempPath = new Path(quotaDir2, "nqdir30");
dfs.rename(new Path(quotaDir3, "nqdir32"), tempPath);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
@ -480,9 +497,11 @@ public class TestQuota {
// 11: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0
assertTrue(dfs.rename(tempPath, new Path("/nqdir0")));
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 6);
@ -501,31 +520,38 @@ public class TestQuota {
// 14: Move /nqdir0/qdir1/qdir21 /nqdir0/qdir1/qdir20
assertTrue(dfs.rename(quotaDir3, quotaDir2));
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 4);
assertEquals(c.getQuota(), 6);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 7);
tempPath = new Path(quotaDir2, "qdir21");
c = dfs.getContentSummary(tempPath);
compareQuotaUsage(c, dfs, tempPath);
assertEquals(c.getDirectoryCount(), 1);
assertEquals(c.getQuota(), 2);
// 15: Delete /nqdir0/qdir1/qdir20/qdir21
dfs.delete(tempPath, true);
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 2);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 3);
assertEquals(c.getQuota(), 6);
// 16: Move /nqdir0/qdir30 /nqdir0/qdir1/qdir20
assertTrue(dfs.rename(new Path("/nqdir0/nqdir30"), quotaDir2));
c = dfs.getContentSummary(quotaDir2);
compareQuotaUsage(c, dfs, quotaDir2);
assertEquals(c.getDirectoryCount(), 5);
assertEquals(c.getQuota(), 7);
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getDirectoryCount(), 6);
assertEquals(c.getQuota(), 6);
assertEquals(14, cluster.getNamesystem().getFSDirectory().getYieldCount());
@ -567,12 +593,14 @@ public class TestQuota {
final Path quotaDir1 = new Path("/nqdir0/qdir1");
dfs.setQuota(quotaDir1, HdfsConstants.QUOTA_DONT_SET, 4 * fileSpace);
ContentSummary c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getSpaceQuota(), 4 * fileSpace);
// set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace
final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20");
dfs.setQuota(quotaDir20, HdfsConstants.QUOTA_DONT_SET, 6 * fileSpace);
c = dfs.getContentSummary(quotaDir20);
compareQuotaUsage(c, dfs, quotaDir20);
assertEquals(c.getSpaceQuota(), 6 * fileSpace);
// Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace
@ -580,6 +608,7 @@ public class TestQuota {
assertTrue(dfs.mkdirs(quotaDir21));
dfs.setQuota(quotaDir21, HdfsConstants.QUOTA_DONT_SET, 2 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceQuota(), 2 * fileSpace);
// 5: Create directory /nqdir0/qdir1/qdir21/nqdir32
@ -590,6 +619,7 @@ public class TestQuota {
DFSTestUtil.createFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen,
replication, 0);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), fileSpace);
// Create a larger file /nqdir0/qdir1/qdir21/nqdir33/
@ -604,11 +634,13 @@ public class TestQuota {
// delete nqdir33
assertTrue(dfs.delete(new Path(quotaDir21, "nqdir33"), true));
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), fileSpace);
assertEquals(c.getSpaceQuota(), 2*fileSpace);
// Verify space before the move:
c = dfs.getContentSummary(quotaDir20);
compareQuotaUsage(c, dfs, quotaDir20);
assertEquals(c.getSpaceConsumed(), 0);
// Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30
@ -621,9 +653,11 @@ public class TestQuota {
assertEquals(c.getSpaceConsumed(), fileSpace);
// verify space for its parent
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getSpaceConsumed(), fileSpace);
// verify space for source for the move
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
final Path file2 = new Path(dstPath, "fileDir/file2");
@ -634,6 +668,7 @@ public class TestQuota {
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
// Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/
@ -654,16 +689,19 @@ public class TestQuota {
c = dfs.getContentSummary(quotaDir20);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
c = dfs.getContentSummary(quotaDir21);
compareQuotaUsage(c, dfs, quotaDir21);
assertEquals(c.getSpaceConsumed(), 0);
// Test Append :
// verify space quota
c = dfs.getContentSummary(quotaDir1);
compareQuotaUsage(c, dfs, quotaDir1);
assertEquals(c.getSpaceQuota(), 4 * fileSpace);
// verify space before append;
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 3 * fileSpace);
OutputStream out = dfs.append(file2);
@ -675,6 +713,7 @@ public class TestQuota {
// verify space after append;
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 4 * fileSpace);
// now increase the quota for quotaDir1
@ -696,6 +735,7 @@ public class TestQuota {
// verify space after partial append
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace);
// Test set replication :
@ -705,6 +745,7 @@ public class TestQuota {
// verify that space is reduced by file2Len
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
// now try to increase the replication and and expect an error.
@ -718,6 +759,7 @@ public class TestQuota {
// verify space consumed remains unchanged.
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len);
// now increase the quota for quotaDir1 and quotaDir20
@ -728,6 +770,7 @@ public class TestQuota {
dfs.setReplication(file2, (short)(replication+1));
// verify increase in space
c = dfs.getContentSummary(dstPath);
compareQuotaUsage(c, dfs, dstPath);
assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len);
// Test HDFS-2053 :
@ -755,32 +798,37 @@ public class TestQuota {
dfs.setQuota(quotaDir2053_C, HdfsConstants.QUOTA_DONT_SET,
(sizeFactorC + 1) * fileSpace);
c = dfs.getContentSummary(quotaDir2053_C);
compareQuotaUsage(c, dfs, quotaDir2053_C);
assertEquals(c.getSpaceQuota(), (sizeFactorC + 1) * fileSpace);
// Create a file under subdirectory A
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_A, "fileA"),
sizeFactorA * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_A);
compareQuotaUsage(c, dfs, quotaDir2053_A);
assertEquals(c.getSpaceConsumed(), sizeFactorA * fileSpace);
// Create a file under subdirectory B
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_B, "fileB"),
sizeFactorB * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_B);
compareQuotaUsage(c, dfs, quotaDir2053_B);
assertEquals(c.getSpaceConsumed(), sizeFactorB * fileSpace);
// Create a file under subdirectory C (which has a space quota)
DFSTestUtil.createFile(dfs, new Path(quotaDir2053_C, "fileC"),
sizeFactorC * fileLen, replication, 0);
c = dfs.getContentSummary(quotaDir2053_C);
compareQuotaUsage(c, dfs, quotaDir2053_C);
assertEquals(c.getSpaceConsumed(), sizeFactorC * fileSpace);
// Check space consumed for /hdfs-2053
c = dfs.getContentSummary(quotaDir2053);
compareQuotaUsage(c, dfs, quotaDir2053);
assertEquals(c.getSpaceConsumed(),
(sizeFactorA + sizeFactorB + sizeFactorC) * fileSpace);
assertEquals(20, cluster.getNamesystem().getFSDirectory().getYieldCount());
assertEquals(28, cluster.getNamesystem().getFSDirectory().getYieldCount());
} finally {
cluster.shutdown();
}
@ -864,21 +912,25 @@ public class TestQuota {
// setting namespace quota to Long.MAX_VALUE - 1 should work
dfs.setQuota(testFolder, Long.MAX_VALUE - 1, 10);
ContentSummary c = dfs.getContentSummary(testFolder);
compareQuotaUsage(c, dfs, testFolder);
assertTrue("Quota not set properly", c.getQuota() == Long.MAX_VALUE - 1);
// setting diskspace quota to Long.MAX_VALUE - 1 should work
dfs.setQuota(testFolder, 10, Long.MAX_VALUE - 1);
c = dfs.getContentSummary(testFolder);
compareQuotaUsage(c, dfs, testFolder);
assertTrue("Quota not set properly", c.getSpaceQuota() == Long.MAX_VALUE - 1);
// setting namespace quota to Long.MAX_VALUE should not work + no error
dfs.setQuota(testFolder, Long.MAX_VALUE, 10);
c = dfs.getContentSummary(testFolder);
compareQuotaUsage(c, dfs, testFolder);
assertTrue("Quota should not have changed", c.getQuota() == 10);
// setting diskspace quota to Long.MAX_VALUE should not work + no error
dfs.setQuota(testFolder, 10, Long.MAX_VALUE);
c = dfs.getContentSummary(testFolder);
compareQuotaUsage(c, dfs, testFolder);
assertTrue("Quota should not have changed", c.getSpaceQuota() == 10);
// setting namespace quota to Long.MAX_VALUE + 1 should not work + error
@ -942,6 +994,7 @@ public class TestQuota {
DFSTestUtil.createFile(fs, file1, FILE_SIZE, (short) 3, 1L);
DFSTestUtil.waitReplication(fs, file1, (short) 3);
c = fs.getContentSummary(dir);
compareQuotaUsage(c, fs, dir);
checkContentSummary(c, webhdfs.getContentSummary(dir));
assertEquals("Quota is half consumed", QUOTA_SIZE / 2,
c.getSpaceConsumed());
@ -1027,6 +1080,7 @@ public class TestQuota {
// Should account for all 59 files (almost QUOTA_SIZE)
c = fs.getContentSummary(dir);
compareQuotaUsage(c, fs, dir);
checkContentSummary(c, webhdfs.getContentSummary(dir));
assertEquals("Invalid space consumed", 59 * FILE_SIZE * 3,
c.getSpaceConsumed());
@ -1088,6 +1142,7 @@ public class TestQuota {
FSDataOutputStream out = dfs.create(new Path("/Folder2/file6"),(short)1);
out.close();
ContentSummary contentSummary = dfs.getContentSummary(new Path("/"));
compareQuotaUsage(contentSummary, dfs, new Path("/"));
assertEquals(6, contentSummary.getFileCount());
} finally {
if (cluster != null) {
@ -1097,4 +1152,11 @@ public class TestQuota {
}
}
// check the QuotaUsage got from getContentSummary is the same as
// getQuotaUsage
private void compareQuotaUsage(final QuotaUsage fromContentSummary,
final FileSystem fileSystem, final Path filePath) throws IOException {
QuotaUsage quotaUsage = fileSystem.getQuotaUsage(filePath);
assertEquals(fromContentSummary, quotaUsage);
}
}

View File

@ -139,11 +139,23 @@ public class TestQuotasWithHA {
* exception.
*/
@Test(expected = StandbyException.class)
public void testgetContentSummaryOnStandby() throws Exception {
public void testGetContentSummaryOnStandby() throws Exception {
Configuration nn1conf =cluster.getConfiguration(1);
// just reset the standby reads to default i.e False on standby.
HAUtil.setAllowStandbyReads(nn1conf, false);
cluster.restartNameNode(1);
cluster.getNameNodeRpc(1).getContentSummary("/");
}
/**
* Test that getQuotaUsage on Standby should should throw standby exception.
*/
@Test(expected = StandbyException.class)
public void testGetQuotaUsageOnStandby() throws Exception {
Configuration nn1conf =cluster.getConfiguration(1);
// just reset the standby reads to default i.e False on standby.
HAUtil.setAllowStandbyReads(nn1conf, false);
cluster.restartNameNode(1);
cluster.getNameNodeRpc(1).getQuotaUsage("/");
}
}