From 5f458ef23f097c784f12a973b326f7e1254ae0b2 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 21 Nov 2013 03:17:47 +0000 Subject: [PATCH] HDFS-5531. Combine the getNsQuota() and getDsQuota() methods in INode. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1544018 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hadoop/hdfs/server/namenode/Content.java | 2 +- .../hdfs/server/namenode/FSDirectory.java | 10 +++-- .../hadoop/hdfs/server/namenode/FSImage.java | 12 +++-- .../hdfs/server/namenode/FSImageFormat.java | 5 ++- .../server/namenode/FSImageSerialization.java | 14 +++--- .../hadoop/hdfs/server/namenode/INode.java | 18 ++++---- .../hdfs/server/namenode/INodeDirectory.java | 3 +- .../namenode/INodeDirectoryAttributes.java | 31 +++++-------- .../namenode/INodeDirectoryWithQuota.java | 24 ++++------ .../hdfs/server/namenode/INodeReference.java | 9 +--- .../server/namenode/NamenodeJspHelper.java | 4 +- .../hadoop/hdfs/server/namenode/Quota.java | 2 +- .../snapshot/INodeDirectoryWithSnapshot.java | 2 +- .../apache/hadoop/hdfs/util/EnumCounters.java | 44 ++++++++++++++----- .../hdfs/server/namenode/FSImageTestUtil.java | 2 +- 16 files changed, 98 insertions(+), 87 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 8f5b25868c8..72bc85e4c6e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -207,6 +207,9 @@ Trunk (Unreleased) HDFS-5451. Add byte and file statistics to PathBasedCacheEntry. (Colin Patrick McCabe via Andrew Wang) + HDFS-5531. Combine the getNsQuota() and getDsQuota() methods in INode. + (szetszwo) + OPTIMIZATIONS HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java index 5dfa3e952f5..c1caae52ec0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Content.java @@ -47,7 +47,7 @@ public enum Content { } private Counts() { - super(Content.values()); + super(Content.class); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9ad24f53ee8..2281becc221 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2406,8 +2406,9 @@ public class FSDirectory implements Closeable { if (dirNode.isRoot() && nsQuota == HdfsConstants.QUOTA_RESET) { throw new IllegalArgumentException("Cannot clear namespace quota on root."); } else { // a directory inode - long oldNsQuota = dirNode.getNsQuota(); - long oldDsQuota = dirNode.getDsQuota(); + final Quota.Counts oldQuota = dirNode.getQuotaCounts(); + final long oldNsQuota = oldQuota.get(Quota.NAMESPACE); + final long oldDsQuota = oldQuota.get(Quota.DISKSPACE); if (nsQuota == HdfsConstants.QUOTA_DONT_SET) { nsQuota = oldNsQuota; } @@ -2459,8 +2460,9 @@ public class FSDirectory implements Closeable { try { INodeDirectory dir = unprotectedSetQuota(src, nsQuota, dsQuota); if (dir != null) { - fsImage.getEditLog().logSetQuota(src, dir.getNsQuota(), - dir.getDsQuota()); + final Quota.Counts q = dir.getQuotaCounts(); + fsImage.getEditLog().logSetQuota(src, + q.get(Quota.NAMESPACE), q.get(Quota.DISKSPACE)); } } finally { writeUnlock(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java index e005e19b26c..dd37cdad815 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java @@ -777,18 +777,22 @@ public class FSImage implements Closeable { if (dir.isQuotaSet()) { // check if quota is violated. It indicates a software bug. + final Quota.Counts q = dir.getQuotaCounts(); + final long namespace = counts.get(Quota.NAMESPACE) - parentNamespace; - if (Quota.isViolated(dir.getNsQuota(), namespace)) { + final long nsQuota = q.get(Quota.NAMESPACE); + if (Quota.isViolated(nsQuota, namespace)) { LOG.error("BUG: Namespace quota violation in image for " + dir.getFullPathName() - + " quota = " + dir.getNsQuota() + " < consumed = " + namespace); + + " quota = " + nsQuota + " < consumed = " + namespace); } final long diskspace = counts.get(Quota.DISKSPACE) - parentDiskspace; - if (Quota.isViolated(dir.getDsQuota(), diskspace)) { + final long dsQuota = q.get(Quota.DISKSPACE); + if (Quota.isViolated(dsQuota, diskspace)) { LOG.error("BUG: Diskspace quota violation in image for " + dir.getFullPathName() - + " quota = " + dir.getDsQuota() + " < consumed = " + diskspace); + + " quota = " + dsQuota + " < consumed = " + diskspace); } ((INodeDirectoryWithQuota)dir).setSpaceConsumed(namespace, diskspace); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java index 3b0427a1e65..df795a4e070 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java @@ -371,8 +371,9 @@ public class FSImageFormat { /** Update the root node's attributes */ private void updateRootAttr(INodeWithAdditionalFields root) { - long nsQuota = root.getNsQuota(); - long dsQuota = root.getDsQuota(); + final Quota.Counts q = root.getQuotaCounts(); + final long nsQuota = q.get(Quota.NAMESPACE); + final long dsQuota = q.get(Quota.DISKSPACE); FSDirectory fsDir = namesystem.dir; if (nsQuota != -1 || dsQuota != -1) { fsDir.rootDir.setQuota(nsQuota, dsQuota); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java index 51ab61b2171..9c94d60e203 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java @@ -219,6 +219,12 @@ public class FSImageSerialization { out.writeLong(file.getPreferredBlockSize()); } + private static void writeQuota(Quota.Counts quota, DataOutput out) + throws IOException { + out.writeLong(quota.get(Quota.NAMESPACE)); + out.writeLong(quota.get(Quota.DISKSPACE)); + } + /** * Serialize a {@link INodeDirectory} * @param node The node to write @@ -234,8 +240,8 @@ public class FSImageSerialization { out.writeLong(0); // preferred block size out.writeInt(-1); // # of blocks - out.writeLong(node.getNsQuota()); - out.writeLong(node.getDsQuota()); + writeQuota(node.getQuotaCounts(), out); + if (node instanceof INodeDirectorySnapshottable) { out.writeBoolean(true); } else { @@ -256,9 +262,7 @@ public class FSImageSerialization { writeLocalName(a, out); writePermissionStatus(a, out); out.writeLong(a.getModificationTime()); - - out.writeLong(a.getNsQuota()); - out.writeLong(a.getDsQuota()); + writeQuota(a.getQuotaCounts(), out); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 1aff9784aba..e5f26b08b1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -383,10 +383,11 @@ public abstract class INode implements INodeAttributes, Diff.Element { public final ContentSummary computeAndConvertContentSummary( ContentSummaryComputationContext summary) { Content.Counts counts = computeContentSummary(summary).getCounts(); + final Quota.Counts q = getQuotaCounts(); return new ContentSummary(counts.get(Content.LENGTH), counts.get(Content.FILE) + counts.get(Content.SYMLINK), - counts.get(Content.DIRECTORY), getNsQuota(), - counts.get(Content.DISKSPACE), getDsQuota()); + counts.get(Content.DIRECTORY), q.get(Quota.NAMESPACE), + counts.get(Content.DISKSPACE), q.get(Quota.DISKSPACE)); } /** @@ -412,18 +413,15 @@ public abstract class INode implements INodeAttributes, Diff.Element { /** * Get the quota set for this inode - * @return the quota if it is set; -1 otherwise + * @return the quota counts. The count is -1 if it is not set. */ - public long getNsQuota() { - return -1; - } - - public long getDsQuota() { - return -1; + public Quota.Counts getQuotaCounts() { + return Quota.Counts.newInstance(-1, -1); } public final boolean isQuotaSet() { - return getNsQuota() >= 0 || getDsQuota() >= 0; + final Quota.Counts q = getQuotaCounts(); + return q.get(Quota.NAMESPACE) >= 0 || q.get(Quota.DISKSPACE) >= 0; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 0355b307c42..8133de46985 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -612,8 +612,7 @@ public class INodeDirectory extends INodeWithAdditionalFields @Override public boolean metadataEquals(INodeDirectoryAttributes other) { return other != null - && getNsQuota() == other.getNsQuota() - && getDsQuota() == other.getDsQuota() + && getQuotaCounts().equals(other.getQuotaCounts()) && getPermissionLong() == other.getPermissionLong(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java index 5bff5cdaf51..b0ea44bd811 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryAttributes.java @@ -27,9 +27,7 @@ import com.google.common.base.Preconditions; */ @InterfaceAudience.Private public interface INodeDirectoryAttributes extends INodeAttributes { - public long getNsQuota(); - - public long getDsQuota(); + public Quota.Counts getQuotaCounts(); public boolean metadataEquals(INodeDirectoryAttributes other); @@ -46,20 +44,14 @@ public interface INodeDirectoryAttributes extends INodeAttributes { } @Override - public long getNsQuota() { - return -1; - } - - @Override - public long getDsQuota() { - return -1; + public Quota.Counts getQuotaCounts() { + return Quota.Counts.newInstance(-1, -1); } @Override public boolean metadataEquals(INodeDirectoryAttributes other) { return other != null - && getNsQuota() == other.getNsQuota() - && getDsQuota() == other.getDsQuota() + && this.getQuotaCounts().equals(other.getQuotaCounts()) && getPermissionLong() == other.getPermissionLong(); } } @@ -68,6 +60,7 @@ public interface INodeDirectoryAttributes extends INodeAttributes { private final long nsQuota; private final long dsQuota; + public CopyWithQuota(byte[] name, PermissionStatus permissions, long modificationTime, long nsQuota, long dsQuota) { super(name, permissions, modificationTime); @@ -78,18 +71,14 @@ public interface INodeDirectoryAttributes extends INodeAttributes { public CopyWithQuota(INodeDirectory dir) { super(dir); Preconditions.checkArgument(dir.isQuotaSet()); - this.nsQuota = dir.getNsQuota(); - this.dsQuota = dir.getDsQuota(); + final Quota.Counts q = dir.getQuotaCounts(); + this.nsQuota = q.get(Quota.NAMESPACE); + this.dsQuota = q.get(Quota.DISKSPACE); } @Override - public final long getNsQuota() { - return nsQuota; - } - - @Override - public final long getDsQuota() { - return dsQuota; + public Quota.Counts getQuotaCounts() { + return Quota.Counts.newInstance(nsQuota, dsQuota); } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java index e18420df4fb..41f1984f77e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java @@ -44,7 +44,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory { * @param dsQuota Diskspace quota to be assigned to this indoe * @param other The other inode from which all other properties are copied */ - public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt, + INodeDirectoryWithQuota(INodeDirectory other, boolean adopt, long nsQuota, long dsQuota) { super(other, adopt); final Quota.Counts counts = other.computeQuotaUsage(); @@ -54,6 +54,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory { this.dsQuota = dsQuota; } + public INodeDirectoryWithQuota(INodeDirectory other, boolean adopt, + Quota.Counts quota) { + this(other, adopt, quota.get(Quota.NAMESPACE), quota.get(Quota.DISKSPACE)); + } + /** constructor with no quota verification */ INodeDirectoryWithQuota(long id, byte[] name, PermissionStatus permissions, long modificationTime, long nsQuota, long dsQuota) { @@ -67,20 +72,9 @@ public class INodeDirectoryWithQuota extends INodeDirectory { super(id, name, permissions, 0L); } - /** Get this directory's namespace quota - * @return this directory's namespace quota - */ @Override - public long getNsQuota() { - return nsQuota; - } - - /** Get this directory's diskspace quota - * @return this directory's diskspace quota - */ - @Override - public long getDsQuota() { - return dsQuota; + public Quota.Counts getQuotaCounts() { + return Quota.Counts.newInstance(nsQuota, dsQuota); } /** Set this directory's quota @@ -120,7 +114,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory { } private void checkDiskspace(final long computed) { - if (-1 != getDsQuota() && diskspace != computed) { + if (-1 != getQuotaCounts().get(Quota.DISKSPACE) && diskspace != computed) { NameNode.LOG.error("BUG: Inconsistent diskspace for directory " + getFullPathName() + ". Cached = " + diskspace + " != Computed = " + computed); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index a049c012917..f77863ada85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -295,15 +295,10 @@ public abstract class INodeReference extends INode { } @Override - public final long getNsQuota() { - return referred.getNsQuota(); + public Quota.Counts getQuotaCounts() { + return referred.getQuotaCounts(); } - @Override - public final long getDsQuota() { - return referred.getDsQuota(); - } - @Override public final void clear() { super.clear(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 50cc13732ba..6d9692de292 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -23,7 +23,6 @@ import java.io.IOException; import java.lang.management.ManagementFactory; import java.lang.management.MemoryMXBean; import java.lang.management.MemoryUsage; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URI; import java.net.URLEncoder; @@ -62,7 +61,6 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.Status; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StepType; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; -import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NodeBase; import org.apache.hadoop.security.UserGroupInformation; @@ -1092,7 +1090,7 @@ class NamenodeJspHelper { doc.endTag(); doc.startTag("ds_quota"); - doc.pcdata(""+inode.getDsQuota()); + doc.pcdata(""+inode.getQuotaCounts().get(Quota.DISKSPACE)); doc.endTag(); doc.startTag("permission_status"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java index 36e803a149b..7abd017dac2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Quota.java @@ -41,7 +41,7 @@ public enum Quota { } Counts() { - super(Quota.values()); + super(Quota.class); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index e745412b7d4..cba65d2243c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -491,7 +491,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { INodeDirectoryWithSnapshot(INodeDirectory that, boolean adopt, DirectoryDiffList diffs) { - super(that, adopt, that.getNsQuota(), that.getDsQuota()); + super(that, adopt, that.getQuotaCounts()); this.diffs = diffs != null? diffs: new DirectoryDiffList(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java index 1df4e1811ee..e3975f6d68a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/EnumCounters.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.util; +import java.util.Arrays; import java.util.HashMap; import com.google.common.base.Preconditions; @@ -34,21 +35,19 @@ import com.google.common.base.Preconditions; * @param the enum type */ public class EnumCounters> { - /** An array of enum constants. */ - private final E[] enumConstants; + /** The class of the enum. */ + private final Class enumClass; /** The counter array, counters[i] corresponds to the enumConstants[i]. */ private final long[] counters; /** * Construct counters for the given enum constants. - * @param enumConstants an array of enum constants such that, - * for all i, enumConstants[i].ordinal() == i. + * @param enumClass the enum class of the counters. */ - public EnumCounters(final E[] enumConstants) { - for(int i = 0; i < enumConstants.length; i++) { - Preconditions.checkArgument(enumConstants[i].ordinal() == i); - } - this.enumConstants = enumConstants; + public EnumCounters(final Class enumClass) { + final E[] enumConstants = enumClass.getEnumConstants(); + Preconditions.checkNotNull(enumConstants); + this.enumClass = enumClass; this.counters = new long[enumConstants.length]; } @@ -69,6 +68,13 @@ public class EnumCounters> { counters[e.ordinal()] = value; } + /** Set this counters to that counters. */ + public final void set(final EnumCounters that) { + for(int i = 0; i < counters.length; i++) { + this.counters[i] = that.counters[i]; + } + } + /** Add the given value to counter e. */ public final void add(final E e, final long value) { counters[e.ordinal()] += value; @@ -86,15 +92,33 @@ public class EnumCounters> { counters[e.ordinal()] -= value; } - /** Subtract that counters from this counters. */ + /** Subtract this counters from that counters. */ public final void subtract(final EnumCounters that) { for(int i = 0; i < counters.length; i++) { this.counters[i] -= that.counters[i]; } } + @Override + public boolean equals(Object obj) { + if (obj == this) { + return true; + } else if (obj == null || !(obj instanceof EnumCounters)) { + return false; + } + final EnumCounters that = (EnumCounters)obj; + return this.enumClass == that.enumClass + && Arrays.equals(this.counters, that.counters); + } + + @Override + public int hashCode() { + return Arrays.hashCode(counters); + } + @Override public String toString() { + final E[] enumConstants = enumClass.getEnumConstants(); final StringBuilder b = new StringBuilder(); for(int i = 0; i < counters.length; i++) { final String name = enumConstants[i].name(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 7c2c7e2f98c..d3f122fc817 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -554,7 +554,7 @@ public abstract class FSImageTestUtil { * get NameSpace quota. */ public static long getNSQuota(FSNamesystem ns) { - return ns.dir.rootDir.getNsQuota(); + return ns.dir.rootDir.getQuotaCounts().get(Quota.NAMESPACE); } public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception {