HDFS-5630. Hook up cache directive and pool usage statistics. (wang)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1548309 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2013-12-05 21:09:30 +00:00
parent ca9a32c951
commit 55e5b0653c
12 changed files with 450 additions and 140 deletions

View File

@ -231,6 +231,8 @@ Trunk (Unreleased)
HDFS-5536. Implement HTTP policy for Namenode and DataNode. (Haohui Mai via
jing9)
HDFS-5630. Hook up cache directive and pool usage statistics. (wang)
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)

View File

@ -46,7 +46,9 @@ public final class CacheDirective implements IntrusiveCollection.Element {
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
private long filesNeeded;
private long filesCached;
private Element prev;
private Element next;
@ -58,9 +60,6 @@ public final class CacheDirective implements IntrusiveCollection.Element {
Preconditions.checkArgument(replication > 0);
this.replication = replication;
this.expiryTime = expiryTime;
this.bytesNeeded = 0;
this.bytesCached = 0;
this.filesAffected = 0;
}
public long getId() {
@ -112,7 +111,8 @@ public final class CacheDirective implements IntrusiveCollection.Element {
return new CacheDirectiveStats.Builder().
setBytesNeeded(bytesNeeded).
setBytesCached(bytesCached).
setFilesAffected(filesAffected).
setFilesNeeded(filesNeeded).
setFilesCached(filesCached).
setHasExpired(new Date().getTime() > expiryTime).
build();
}
@ -131,7 +131,8 @@ public final class CacheDirective implements IntrusiveCollection.Element {
append(", expiryTime: ").append(getExpiryTimeString()).
append(", bytesNeeded:").append(bytesNeeded).
append(", bytesCached:").append(bytesCached).
append(", filesAffected:").append(filesAffected).
append(", filesNeeded:").append(filesNeeded).
append(", filesCached:").append(filesCached).
append(" }");
return builder.toString();
}
@ -152,42 +153,60 @@ public final class CacheDirective implements IntrusiveCollection.Element {
return new HashCodeBuilder().append(id).toHashCode();
}
//
// Stats related getters and setters
//
/**
* Resets the byte and file statistics being tracked by this CacheDirective.
*/
public void resetStatistics() {
bytesNeeded = 0;
bytesCached = 0;
filesNeeded = 0;
filesCached = 0;
}
public long getBytesNeeded() {
return bytesNeeded;
}
public void clearBytesNeeded() {
this.bytesNeeded = 0;
}
public void addBytesNeeded(long toAdd) {
this.bytesNeeded += toAdd;
public void addBytesNeeded(long bytes) {
this.bytesNeeded += bytes;
pool.addBytesNeeded(bytes);
}
public long getBytesCached() {
return bytesCached;
}
public void clearBytesCached() {
this.bytesCached = 0;
public void addBytesCached(long bytes) {
this.bytesCached += bytes;
pool.addBytesCached(bytes);
}
public void addBytesCached(long toAdd) {
this.bytesCached += toAdd;
public long getFilesNeeded() {
return filesNeeded;
}
public long getFilesAffected() {
return filesAffected;
public void addFilesNeeded(long files) {
this.filesNeeded += files;
pool.addFilesNeeded(files);
}
public void clearFilesAffected() {
this.filesAffected = 0;
public long getFilesCached() {
return filesCached;
}
public void incrementFilesAffected() {
this.filesAffected++;
public void addFilesCached(long files) {
this.filesCached += files;
pool.addFilesCached(files);
}
//
// IntrusiveCollection.Element implementation
//
@SuppressWarnings("unchecked")
@Override // IntrusiveCollection.Element
public void insertInternal(IntrusiveCollection<? extends Element> list,

View File

@ -29,7 +29,8 @@ public class CacheDirectiveStats {
public static class Builder {
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
private long filesNeeded;
private long filesCached;
private boolean hasExpired;
/**
@ -38,8 +39,8 @@ public class CacheDirectiveStats {
* @return New CacheDirectiveStats.
*/
public CacheDirectiveStats build() {
return new CacheDirectiveStats(bytesNeeded, bytesCached, filesAffected,
hasExpired);
return new CacheDirectiveStats(bytesNeeded, bytesCached, filesNeeded,
filesCached, hasExpired);
}
/**
@ -71,13 +72,23 @@ public class CacheDirectiveStats {
}
/**
* Sets the files affected by this directive.
*
* @param filesAffected The files affected.
* Sets the files needed by this directive.
* @param filesNeeded The number of files needed
* @return This builder, for call chaining.
*/
public Builder setFilesAffected(long filesAffected) {
this.filesAffected = filesAffected;
public Builder setFilesNeeded(long filesNeeded) {
this.filesNeeded = filesNeeded;
return this;
}
/**
* Sets the files cached by this directive.
*
* @param filesCached The number of files cached.
* @return This builder, for call chaining.
*/
public Builder setFilesCached(long filesCached) {
this.filesCached = filesCached;
return this;
}
@ -95,14 +106,16 @@ public class CacheDirectiveStats {
private final long bytesNeeded;
private final long bytesCached;
private final long filesAffected;
private final long filesNeeded;
private final long filesCached;
private final boolean hasExpired;
private CacheDirectiveStats(long bytesNeeded, long bytesCached,
long filesAffected, boolean hasExpired) {
long filesNeeded, long filesCached, boolean hasExpired) {
this.bytesNeeded = bytesNeeded;
this.bytesCached = bytesCached;
this.filesAffected = filesAffected;
this.filesNeeded = filesNeeded;
this.filesCached = filesCached;
this.hasExpired = hasExpired;
}
@ -121,10 +134,17 @@ public class CacheDirectiveStats {
}
/**
* @return The files affected.
* @return The number of files needed.
*/
public long getFilesAffected() {
return filesAffected;
public long getFilesNeeded() {
return filesNeeded;
}
/**
* @return The number of files cached.
*/
public long getFilesCached() {
return filesCached;
}
/**
@ -140,7 +160,8 @@ public class CacheDirectiveStats {
builder.append("{");
builder.append("bytesNeeded: ").append(bytesNeeded);
builder.append(", ").append("bytesCached: ").append(bytesCached);
builder.append(", ").append("filesAffected: ").append(filesAffected);
builder.append(", ").append("filesNeeded: ").append(filesNeeded);
builder.append(", ").append("filesCached: ").append(filesCached);
builder.append(", ").append("hasExpired: ").append(hasExpired);
builder.append("}");
return builder.toString();

View File

@ -30,7 +30,8 @@ public class CachePoolStats {
public static class Builder {
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
private long filesNeeded;
private long filesCached;
public Builder() {
}
@ -45,24 +46,33 @@ public class CachePoolStats {
return this;
}
public Builder setFilesAffected(long filesAffected) {
this.filesAffected = filesAffected;
public Builder setFilesNeeded(long filesNeeded) {
this.filesNeeded = filesNeeded;
return this;
}
public Builder setFilesCached(long filesCached) {
this.filesCached = filesCached;
return this;
}
public CachePoolStats build() {
return new CachePoolStats(bytesNeeded, bytesCached, filesAffected);
return new CachePoolStats(bytesNeeded, bytesCached, filesNeeded,
filesCached);
}
};
private final long bytesNeeded;
private final long bytesCached;
private final long filesAffected;
private final long filesNeeded;
private final long filesCached;
private CachePoolStats(long bytesNeeded, long bytesCached, long filesAffected) {
private CachePoolStats(long bytesNeeded, long bytesCached, long filesNeeded,
long filesCached) {
this.bytesNeeded = bytesNeeded;
this.bytesCached = bytesCached;
this.filesAffected = filesAffected;
this.filesNeeded = filesNeeded;
this.filesCached = filesCached;
}
public long getBytesNeeded() {
@ -70,18 +80,23 @@ public class CachePoolStats {
}
public long getBytesCached() {
return bytesNeeded;
return bytesCached;
}
public long getFilesAffected() {
return filesAffected;
public long getFilesNeeded() {
return filesNeeded;
}
public long getFilesCached() {
return filesCached;
}
public String toString() {
return new StringBuilder().append("{").
append("bytesNeeded:").append(bytesNeeded).
append(", bytesCached:").append(bytesCached).
append(", filesAffected:").append(filesAffected).
append(", filesNeeded:").append(filesNeeded).
append(", filesCached:").append(filesCached).
append("}").toString();
}
}

View File

@ -1642,7 +1642,8 @@ public class PBHelper {
CacheDirectiveStatsProto.newBuilder();
builder.setBytesNeeded(stats.getBytesNeeded());
builder.setBytesCached(stats.getBytesCached());
builder.setFilesAffected(stats.getFilesAffected());
builder.setFilesNeeded(stats.getFilesNeeded());
builder.setFilesCached(stats.getFilesCached());
builder.setHasExpired(stats.hasExpired());
return builder.build();
}
@ -1651,7 +1652,8 @@ public class PBHelper {
CacheDirectiveStats.Builder builder = new CacheDirectiveStats.Builder();
builder.setBytesNeeded(proto.getBytesNeeded());
builder.setBytesCached(proto.getBytesCached());
builder.setFilesAffected(proto.getFilesAffected());
builder.setFilesNeeded(proto.getFilesNeeded());
builder.setFilesCached(proto.getFilesCached());
builder.setHasExpired(proto.getHasExpired());
return builder.build();
}
@ -1711,7 +1713,8 @@ public class PBHelper {
CachePoolStatsProto.Builder builder = CachePoolStatsProto.newBuilder();
builder.setBytesNeeded(stats.getBytesNeeded());
builder.setBytesCached(stats.getBytesCached());
builder.setFilesAffected(stats.getFilesAffected());
builder.setFilesNeeded(stats.getFilesNeeded());
builder.setFilesCached(stats.getFilesCached());
return builder.build();
}
@ -1719,7 +1722,8 @@ public class PBHelper {
CachePoolStats.Builder builder = new CachePoolStats.Builder();
builder.setBytesNeeded(proto.getBytesNeeded());
builder.setBytesCached(proto.getBytesCached());
builder.setFilesAffected(proto.getFilesAffected());
builder.setFilesNeeded(proto.getFilesNeeded());
builder.setFilesCached(proto.getFilesCached());
return builder.build();
}

View File

@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
import org.apache.hadoop.hdfs.server.namenode.CacheManager;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
@ -198,6 +199,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
scannedBlocks = 0;
namesystem.writeLock();
try {
resetStatistics();
rescanCacheDirectives();
rescanCachedBlockMap();
blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
@ -206,6 +208,15 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
}
}
private void resetStatistics() {
for (CachePool pool: cacheManager.getCachePools()) {
pool.resetStatistics();
}
for (CacheDirective directive: cacheManager.getCacheDirectives()) {
directive.resetStatistics();
}
}
/**
* Scan all CacheDirectives. Use the information to figure out
* what cache replication factor each block should have.
@ -213,11 +224,9 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
private void rescanCacheDirectives() {
FSDirectory fsDir = namesystem.getFSDirectory();
final long now = new Date().getTime();
for (CacheDirective directive : cacheManager.getEntriesById().values()) {
// Reset the directive
directive.clearBytesNeeded();
directive.clearBytesCached();
directive.clearFilesAffected();
for (CacheDirective directive : cacheManager.getCacheDirectives()) {
// Reset the directive's statistics
directive.resetStatistics();
// Skip processing this entry if it has expired
LOG.info("Directive expiry is at " + directive.getExpiryTime());
if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
@ -262,26 +271,34 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
/**
* Apply a CacheDirective to a file.
*
* @param pce The CacheDirective to apply.
* @param file The file.
*
* @param directive The CacheDirective to apply.
* @param file The file.
*/
private void rescanFile(CacheDirective pce, INodeFile file) {
pce.incrementFilesAffected();
private void rescanFile(CacheDirective directive, INodeFile file) {
BlockInfo[] blockInfos = file.getBlocks();
long cachedTotal = 0;
// Increment the "needed" statistics
directive.addFilesNeeded(1);
long neededTotal = 0;
for (BlockInfo blockInfo : blockInfos) {
long neededByBlock =
directive.getReplication() * blockInfo.getNumBytes();
neededTotal += neededByBlock;
}
directive.addBytesNeeded(neededTotal);
// TODO: Enforce per-pool quotas
long cachedTotal = 0;
for (BlockInfo blockInfo : blockInfos) {
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
// We don't try to cache blocks that are under construction.
continue;
}
long neededByBlock =
pce.getReplication() * blockInfo.getNumBytes();
neededTotal += neededByBlock;
Block block = new Block(blockInfo.getBlockId());
CachedBlock ncblock = new CachedBlock(block.getBlockId(),
pce.getReplication(), mark);
directive.getReplication(), mark);
CachedBlock ocblock = cachedBlocks.get(ncblock);
if (ocblock == null) {
cachedBlocks.put(ncblock);
@ -294,26 +311,30 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
// both get them added to their bytesCached.
List<DatanodeDescriptor> cachedOn =
ocblock.getDatanodes(Type.CACHED);
long cachedByBlock = Math.min(cachedOn.size(), pce.getReplication()) *
blockInfo.getNumBytes();
long cachedByBlock = Math.min(cachedOn.size(),
directive.getReplication()) * blockInfo.getNumBytes();
cachedTotal += cachedByBlock;
if (mark != ocblock.getMark()) {
// Mark hasn't been set in this scan, so update replication and mark.
ocblock.setReplicationAndMark(pce.getReplication(), mark);
ocblock.setReplicationAndMark(directive.getReplication(), mark);
} else {
// Mark already set in this scan. Set replication to highest value in
// any CacheDirective that covers this file.
ocblock.setReplicationAndMark((short)Math.max(
pce.getReplication(), ocblock.getReplication()), mark);
directive.getReplication(), ocblock.getReplication()), mark);
}
}
}
pce.addBytesNeeded(neededTotal);
pce.addBytesCached(cachedTotal);
// Increment the "cached" statistics
directive.addBytesCached(cachedTotal);
if (cachedTotal == neededTotal) {
directive.addFilesCached(1);
}
if (LOG.isTraceEnabled()) {
LOG.debug("Directive " + pce.getId() + " is caching " +
file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
LOG.trace("Directive " + directive.getId() + " is caching " +
file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal +
" bytes");
}
}

View File

@ -31,6 +31,7 @@ import java.io.DataOutput;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
@ -238,9 +239,20 @@ public final class CacheManager {
return active;
}
public TreeMap<Long, CacheDirective> getEntriesById() {
/**
* @return Unmodifiable view of the collection of CachePools.
*/
public Collection<CachePool> getCachePools() {
assert namesystem.hasReadLock();
return directivesById;
return Collections.unmodifiableCollection(cachePools.values());
}
/**
* @return Unmodifiable view of the collection of CacheDirectives.
*/
public Collection<CacheDirective> getCacheDirectives() {
assert namesystem.hasReadLock();
return Collections.unmodifiableCollection(directivesById.values());
}
@VisibleForTesting

View File

@ -70,9 +70,14 @@ public final class CachePool {
*/
@Nonnull
private FsPermission mode;
private int weight;
private long bytesNeeded;
private long bytesCached;
private long filesNeeded;
private long filesCached;
public final static class DirectiveList
extends IntrusiveCollection<CacheDirective> {
private CachePool cachePool;
@ -202,6 +207,48 @@ public final class CachePool {
setWeight(weight);
}
/**
* Resets statistics related to this CachePool
*/
public void resetStatistics() {
bytesNeeded = 0;
bytesCached = 0;
filesNeeded = 0;
filesCached = 0;
}
public void addBytesNeeded(long bytes) {
bytesNeeded += bytes;
}
public void addBytesCached(long bytes) {
bytesCached += bytes;
}
public void addFilesNeeded(long files) {
filesNeeded += files;
}
public void addFilesCached(long files) {
filesCached += files;
}
public long getBytesNeeded() {
return bytesNeeded;
}
public long getBytesCached() {
return bytesCached;
}
public long getFilesNeeded() {
return filesNeeded;
}
public long getFilesCached() {
return filesCached;
}
/**
* Get statistics about this CachePool.
*
@ -209,9 +256,10 @@ public final class CachePool {
*/
private CachePoolStats getStats() {
return new CachePoolStats.Builder().
setBytesNeeded(0).
setBytesCached(0).
setFilesAffected(0).
setBytesNeeded(bytesNeeded).
setBytesCached(bytesCached).
setFilesNeeded(filesNeeded).
setFilesCached(filesCached).
build();
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.tools.TableListing.Justification;
import org.apache.hadoop.ipc.RemoteException;
@ -477,9 +478,10 @@ public class CacheAdmin extends Configured implements Tool {
addField("EXPIRY", Justification.LEFT).
addField("PATH", Justification.LEFT);
if (printStats) {
tableBuilder.addField("NEEDED", Justification.RIGHT).
addField("CACHED", Justification.RIGHT).
addField("FILES", Justification.RIGHT);
tableBuilder.addField("BYTES_NEEDED", Justification.RIGHT).
addField("BYTES_CACHED", Justification.RIGHT).
addField("FILES_NEEDED", Justification.RIGHT).
addField("FILES_CACHED", Justification.RIGHT);
}
TableListing tableListing = tableBuilder.build();
@ -507,7 +509,8 @@ public class CacheAdmin extends Configured implements Tool {
if (printStats) {
row.add("" + stats.getBytesNeeded());
row.add("" + stats.getBytesCached());
row.add("" + stats.getFilesAffected());
row.add("" + stats.getFilesNeeded());
row.add("" + stats.getFilesCached());
}
tableListing.addRow(row.toArray(new String[0]));
numEntries++;
@ -769,13 +772,14 @@ public class CacheAdmin extends Configured implements Tool {
@Override
public String getShortUsage() {
return "[" + getName() + " [name]]\n";
return "[" + getName() + " [-stats] [<name>]]\n";
}
@Override
public String getLongUsage() {
TableListing listing = getOptionDescriptionListing();
listing.addRow("[name]", "If specified, list only the named cache pool.");
listing.addRow("-stats", "Display additional cache pool statistics.");
listing.addRow("<name>", "If specified, list only the named cache pool.");
return getShortUsage() + "\n" +
WordUtils.wrap("Display information about one or more cache pools, " +
@ -787,6 +791,7 @@ public class CacheAdmin extends Configured implements Tool {
@Override
public int run(Configuration conf, List<String> args) throws IOException {
String name = StringUtils.popFirstNonOption(args);
final boolean printStats = StringUtils.popOption("-stats", args);
if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n");
@ -794,28 +799,42 @@ public class CacheAdmin extends Configured implements Tool {
return 1;
}
DistributedFileSystem dfs = getDFS(conf);
TableListing listing = new TableListing.Builder().
TableListing.Builder builder = new TableListing.Builder().
addField("NAME", Justification.LEFT).
addField("OWNER", Justification.LEFT).
addField("GROUP", Justification.LEFT).
addField("MODE", Justification.LEFT).
addField("WEIGHT", Justification.RIGHT).
build();
addField("WEIGHT", Justification.RIGHT);
if (printStats) {
builder.
addField("BYTES_NEEDED", Justification.RIGHT).
addField("BYTES_CACHED", Justification.RIGHT).
addField("FILES_NEEDED", Justification.RIGHT).
addField("FILES_CACHED", Justification.RIGHT);
}
TableListing listing = builder.build();
int numResults = 0;
try {
RemoteIterator<CachePoolEntry> iter = dfs.listCachePools();
while (iter.hasNext()) {
CachePoolEntry entry = iter.next();
CachePoolInfo info = entry.getInfo();
String[] row = new String[5];
LinkedList<String> row = new LinkedList<String>();
if (name == null || info.getPoolName().equals(name)) {
row[0] = info.getPoolName();
row[1] = info.getOwnerName();
row[2] = info.getGroupName();
row[3] = info.getMode() != null ? info.getMode().toString() : null;
row[4] =
info.getWeight() != null ? info.getWeight().toString() : null;
listing.addRow(row);
row.add(info.getPoolName());
row.add(info.getOwnerName());
row.add(info.getGroupName());
row.add(info.getMode() != null ? info.getMode().toString() : null);
row.add(
info.getWeight() != null ? info.getWeight().toString() : null);
if (printStats) {
CachePoolStats stats = entry.getStats();
row.add(Long.toString(stats.getBytesNeeded()));
row.add(Long.toString(stats.getBytesCached()));
row.add(Long.toString(stats.getFilesNeeded()));
row.add(Long.toString(stats.getFilesCached()));
}
listing.addRow(row.toArray(new String[] {}));
++numResults;
if (name != null) {
break;

View File

@ -379,8 +379,9 @@ message CacheDirectiveInfoExpirationProto {
message CacheDirectiveStatsProto {
required int64 bytesNeeded = 1;
required int64 bytesCached = 2;
required int64 filesAffected = 3;
required bool hasExpired = 4;
required int64 filesNeeded = 3;
required int64 filesCached = 4;
required bool hasExpired = 5;
}
message AddCacheDirectiveRequestProto {
@ -431,7 +432,8 @@ message CachePoolInfoProto {
message CachePoolStatsProto {
required int64 bytesNeeded = 1;
required int64 bytesCached = 2;
required int64 filesAffected = 3;
required int64 filesNeeded = 3;
required int64 filesCached = 4;
}
message AddCachePoolRequestProto {

View File

@ -62,6 +62,7 @@ import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@ -623,45 +624,111 @@ public class TestCacheDirectives {
}, 500, 60000);
}
private static void waitForCachedStats(final DistributedFileSystem dfs,
final long targetFilesAffected, final long targetBytesNeeded,
final long targetBytesCached,
final CacheDirectiveInfo filter, final String infoString)
private static void waitForCacheDirectiveStats(final DistributedFileSystem dfs,
final long targetBytesNeeded, final long targetBytesCached,
final long targetFilesNeeded, final long targetFilesCached,
final CacheDirectiveInfo filter, final String infoString)
throws Exception {
LOG.info("Polling listDirectives{" +
((filter == null) ? "ALL" : filter.toString()) +
" for " + targetFilesAffected + " targetFilesAffected, " +
targetBytesNeeded + " targetBytesNeeded, " +
targetBytesCached + " targetBytesCached");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
RemoteIterator<CacheDirectiveEntry> iter = null;
CacheDirectiveEntry entry = null;
LOG.info("Polling listCacheDirectives " +
((filter == null) ? "ALL" : filter.toString()) + " for " +
targetBytesNeeded + " targetBytesNeeded, " +
targetBytesCached + " targetBytesCached, " +
targetFilesNeeded + " targetFilesNeeded, " +
targetFilesCached + " targetFilesCached");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
RemoteIterator<CacheDirectiveEntry> iter = null;
CacheDirectiveEntry entry = null;
try {
iter = dfs.listCacheDirectives(filter);
entry = iter.next();
} catch (IOException e) {
fail("got IOException while calling " +
"listCacheDirectives: " + e.getMessage());
}
Assert.assertNotNull(entry);
CacheDirectiveStats stats = entry.getStats();
if ((targetBytesNeeded == stats.getBytesNeeded()) &&
(targetBytesCached == stats.getBytesCached()) &&
(targetFilesNeeded == stats.getFilesNeeded()) &&
(targetFilesCached == stats.getFilesCached())) {
return true;
} else {
LOG.info(infoString + ": " +
"filesNeeded: " +
stats.getFilesNeeded() + "/" + targetFilesNeeded +
", filesCached: " +
stats.getFilesCached() + "/" + targetFilesCached +
", bytesNeeded: " +
stats.getBytesNeeded() + "/" + targetBytesNeeded +
", bytesCached: " +
stats.getBytesCached() + "/" + targetBytesCached);
return false;
}
}
}, 500, 60000);
}
private static void waitForCachePoolStats(final DistributedFileSystem dfs,
final long targetBytesNeeded, final long targetBytesCached,
final long targetFilesNeeded, final long targetFilesCached,
final CachePoolInfo pool, final String infoString)
throws Exception {
LOG.info("Polling listCachePools " + pool.toString() + " for " +
targetBytesNeeded + " targetBytesNeeded, " +
targetBytesCached + " targetBytesCached, " +
targetFilesNeeded + " targetFilesNeeded, " +
targetFilesCached + " targetFilesCached");
GenericTestUtils.waitFor(new Supplier<Boolean>() {
@Override
public Boolean get() {
RemoteIterator<CachePoolEntry> iter = null;
try {
iter = dfs.listCachePools();
} catch (IOException e) {
fail("got IOException while calling " +
"listCachePools: " + e.getMessage());
}
while (true) {
CachePoolEntry entry = null;
try {
iter = dfs.listCacheDirectives(filter);
if (!iter.hasNext()) {
break;
}
entry = iter.next();
} catch (IOException e) {
fail("got IOException while calling " +
"listCacheDirectives: " + e.getMessage());
fail("got IOException while iterating through " +
"listCachePools: " + e.getMessage());
}
Assert.assertNotNull(entry);
CacheDirectiveStats stats = entry.getStats();
if ((targetFilesAffected == stats.getFilesAffected()) &&
(targetBytesNeeded == stats.getBytesNeeded()) &&
(targetBytesCached == stats.getBytesCached())) {
if (entry == null) {
break;
}
if (!entry.getInfo().getPoolName().equals(pool.getPoolName())) {
continue;
}
CachePoolStats stats = entry.getStats();
if ((targetBytesNeeded == stats.getBytesNeeded()) &&
(targetBytesCached == stats.getBytesCached()) &&
(targetFilesNeeded == stats.getFilesNeeded()) &&
(targetFilesCached == stats.getFilesCached())) {
return true;
} else {
LOG.info(infoString + ": filesAffected: " +
stats.getFilesAffected() + "/" + targetFilesAffected +
", bytesNeeded: " +
LOG.info(infoString + ": " +
"filesNeeded: " +
stats.getFilesNeeded() + "/" + targetFilesNeeded +
", filesCached: " +
stats.getFilesCached() + "/" + targetFilesCached +
", bytesNeeded: " +
stats.getBytesNeeded() + "/" + targetBytesNeeded +
", bytesCached: " +
", bytesCached: " +
stats.getBytesCached() + "/" + targetBytesCached);
return false;
}
}
}, 500, 60000);
return false;
}
}, 500, 60000);
}
private static void checkNumCachedReplicas(final DistributedFileSystem dfs,
@ -837,7 +904,8 @@ public class TestCacheDirectives {
NameNode namenode = cluster.getNameNode();
// Create the pool
final String pool = "friendlyPool";
dfs.addCachePool(new CachePoolInfo(pool));
final CachePoolInfo poolInfo = new CachePoolInfo(pool);
dfs.addCachePool(poolInfo);
// Create some test files
final List<Path> paths = new LinkedList<Path>();
paths.add(new Path("/foo/bar"));
@ -853,6 +921,7 @@ public class TestCacheDirectives {
}
waitForCachedBlocks(namenode, 0, 0,
"testWaitForCachedReplicasInDirectory:0");
// cache entire directory
long id = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
@ -861,14 +930,20 @@ public class TestCacheDirectives {
setPool(pool).
build());
waitForCachedBlocks(namenode, 4, 8,
"testWaitForCachedReplicasInDirectory:1");
"testWaitForCachedReplicasInDirectory:1:blocks");
// Verify that listDirectives gives the stats we want.
waitForCachedStats(dfs, 2,
8 * BLOCK_SIZE, 8 * BLOCK_SIZE,
waitForCacheDirectiveStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
2, 2,
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
build(),
"testWaitForCachedReplicasInDirectory:2");
"testWaitForCachedReplicasInDirectory:1:directive");
waitForCachePoolStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
2, 2,
poolInfo, "testWaitForCachedReplicasInDirectory:1:pool");
long id2 = dfs.addCacheDirective(
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo/bar")).
@ -877,28 +952,42 @@ public class TestCacheDirectives {
build());
// wait for an additional 2 cached replicas to come up
waitForCachedBlocks(namenode, 4, 10,
"testWaitForCachedReplicasInDirectory:3");
"testWaitForCachedReplicasInDirectory:2:blocks");
// the directory directive's stats are unchanged
waitForCachedStats(dfs, 2,
8 * BLOCK_SIZE, 8 * BLOCK_SIZE,
waitForCacheDirectiveStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE, 4 * numBlocksPerFile * BLOCK_SIZE,
2, 2,
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo")).
build(),
"testWaitForCachedReplicasInDirectory:4");
"testWaitForCachedReplicasInDirectory:2:directive-1");
// verify /foo/bar's stats
waitForCachedStats(dfs, 1,
waitForCacheDirectiveStats(dfs,
4 * numBlocksPerFile * BLOCK_SIZE,
// only 3 because the file only has 3 replicas, not 4 as requested.
3 * numBlocksPerFile * BLOCK_SIZE,
1,
// only 0 because the file can't be fully cached
0,
new CacheDirectiveInfo.Builder().
setPath(new Path("/foo/bar")).
build(),
"testWaitForCachedReplicasInDirectory:5");
"testWaitForCachedReplicasInDirectory:2:directive-2");
waitForCachePoolStats(dfs,
(4+4) * numBlocksPerFile * BLOCK_SIZE,
(4+3) * numBlocksPerFile * BLOCK_SIZE,
3, 2,
poolInfo, "testWaitForCachedReplicasInDirectory:2:pool");
// remove and watch numCached go to 0
dfs.removeCacheDirective(id);
dfs.removeCacheDirective(id2);
waitForCachedBlocks(namenode, 0, 0,
"testWaitForCachedReplicasInDirectory:6");
"testWaitForCachedReplicasInDirectory:3:blocks");
waitForCachePoolStats(dfs,
0, 0,
0, 0,
poolInfo, "testWaitForCachedReplicasInDirectory:3:pool");
} finally {
cluster.shutdown();
}

View File

@ -399,5 +399,63 @@
</comparators>
</test>
<test> <!--Tested -->
<description>Testing listing cache pool statistics</description>
<test-commands>
<cache-admin-command>-addPool foo -owner bob -group bob -mode 0664</cache-admin-command>
<cache-admin-command>-addPool bar -owner alice -group alicegroup -mode 0755</cache-admin-command>
<cache-admin-command>-listPools -stats</cache-admin-command>
</test-commands>
<cleanup-commands>
<cache-admin-command>-removePool foo</cache-admin-command>
<cache-admin-command>-removePool bar</cache-admin-command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Found 2 results.</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>bar alice alicegroup rwxr-xr-x 100 0 0 0 0</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>foo bob bob rw-rw-r-- 100 0 0 0 0</expected-output>
</comparator>
</comparators>
</test>
<test> <!--Tested -->
<description>Testing listing cache directive statistics</description>
<test-commands>
<cache-admin-command>-addPool pool1</cache-admin-command>
<cache-admin-command>-addDirective -path /foo -pool pool1 -ttl 2d</cache-admin-command>
<cache-admin-command>-addDirective -path /bar -pool pool1 -ttl 24h</cache-admin-command>
<cache-admin-command>-addDirective -path /baz -replication 2 -pool pool1 -ttl 60m</cache-admin-command>
<cache-admin-command>-listDirectives -pool pool1 -stats</cache-admin-command>
</test-commands>
<cleanup-commands>
<cache-admin-command>-removePool pool1</cache-admin-command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Found 3 entries</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>/foo 0 0 0 0</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>/bar 0 0 0 0</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>/baz 0 0 0 0</expected-output>
</comparator>
</comparators>
</test>
</tests>
</configuration>