HDFS-5451. Add byte and file statistics to PathBasedCacheEntry. Contributed by Colin Patrick McCabe.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1543958 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Andrew Wang 2013-11-20 21:31:41 +00:00
parent 8313697752
commit 916ab9286b
13 changed files with 317 additions and 43 deletions

View File

@ -204,6 +204,9 @@ Trunk (Unreleased)
HDFS-5525. Inline dust templates for new Web UI. (Haohui Mai via jing9)
HDFS-5451. Add byte and file statistics to PathBasedCacheEntry.
(Colin Patrick McCabe via Andrew Wang)
OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)

View File

@ -37,6 +37,9 @@ public static class Builder {
private Path path;
private Short replication;
private String pool;
private Long bytesNeeded;
private Long bytesCached;
private Long filesAffected;
/**
* Builds a new PathBasedCacheDirective populated with the set properties.
@ -44,7 +47,8 @@ public static class Builder {
* @return New PathBasedCacheDirective.
*/
public PathBasedCacheDirective build() {
return new PathBasedCacheDirective(id, path, replication, pool);
return new PathBasedCacheDirective(id, path, replication, pool,
bytesNeeded, bytesCached, filesAffected);
}
/**
@ -62,6 +66,9 @@ public Builder(PathBasedCacheDirective directive) {
this.path = directive.getPath();
this.replication = directive.getReplication();
this.pool = directive.getPool();
this.bytesNeeded = directive.bytesNeeded;
this.bytesCached = directive.bytesCached;
this.filesAffected = directive.filesAffected;
}
/**
@ -97,6 +104,39 @@ public Builder setReplication(Short replication) {
return this;
}
/**
* Sets the bytes needed by this directive.
*
* @param bytesNeeded The bytes needed.
* @return This builder, for call chaining.
*/
public Builder setBytesNeeded(Long bytesNeeded) {
this.bytesNeeded = bytesNeeded;
return this;
}
/**
* Sets the bytes cached by this directive.
*
* @param bytesCached The bytes cached.
* @return This builder, for call chaining.
*/
public Builder setBytesCached(Long bytesCached) {
this.bytesCached = bytesCached;
return this;
}
/**
* Sets the files affected by this directive.
*
* @param filesAffected The files affected.
* @return This builder, for call chaining.
*/
public Builder setFilesAffected(Long filesAffected) {
this.filesAffected = filesAffected;
return this;
}
/**
* Sets the pool used in this request.
*
@ -113,12 +153,19 @@ public Builder setPool(String pool) {
private final Path path;
private final Short replication;
private final String pool;
private final Long bytesNeeded;
private final Long bytesCached;
private final Long filesAffected;
PathBasedCacheDirective(Long id, Path path, Short replication, String pool) {
PathBasedCacheDirective(Long id, Path path, Short replication, String pool,
Long bytesNeeded, Long bytesCached, Long filesAffected) {
this.id = id;
this.path = path;
this.replication = replication;
this.pool = pool;
this.bytesNeeded = bytesNeeded;
this.bytesCached = bytesCached;
this.filesAffected = filesAffected;
}
/**
@ -148,6 +195,27 @@ public Short getReplication() {
public String getPool() {
return pool;
}
/**
* @return The bytes needed.
*/
public Long getBytesNeeded() {
return bytesNeeded;
}
/**
* @return The bytes cached.
*/
public Long getBytesCached() {
return bytesCached;
}
/**
* @return The files affected.
*/
public Long getFilesAffected() {
return filesAffected;
}
@Override
public boolean equals(Object o) {
@ -195,6 +263,18 @@ public String toString() {
builder.append(prefix).append("pool: ").append(pool);
prefix = ",";
}
if (bytesNeeded != null) {
builder.append(prefix).append("bytesNeeded: ").append(bytesNeeded);
prefix = ",";
}
if (bytesCached != null) {
builder.append(prefix).append("bytesCached: ").append(bytesCached);
prefix = ",";
}
if (filesAffected != null) {
builder.append(prefix).append("filesAffected: ").append(filesAffected);
prefix = ",";
}
builder.append("}");
return builder.toString();
}

View File

@ -35,6 +35,9 @@ public final class PathBasedCacheEntry {
private final String path;
private final short replication;
private final CachePool pool;
private long bytesNeeded;
private long bytesCached;
private long filesAffected;
public PathBasedCacheEntry(long entryId, String path,
short replication, CachePool pool) {
@ -46,6 +49,9 @@ public PathBasedCacheEntry(long entryId, String path,
this.replication = replication;
Preconditions.checkNotNull(path);
this.pool = pool;
this.bytesNeeded = 0;
this.bytesCached = 0;
this.filesAffected = 0;
}
public long getEntryId() {
@ -70,6 +76,9 @@ public PathBasedCacheDirective toDirective() {
setPath(new Path(path)).
setReplication(replication).
setPool(pool.getPoolName()).
setBytesNeeded(bytesNeeded).
setBytesCached(bytesCached).
setFilesAffected(filesAffected).
build();
}
@ -80,6 +89,9 @@ public String toString() {
append(", path:").append(path).
append(", replication:").append(replication).
append(", pool:").append(pool).
append(", bytesNeeded:").append(bytesNeeded).
append(", bytesCached:").append(bytesCached).
append(", filesAffected:").append(filesAffected).
append(" }");
return builder.toString();
}
@ -99,4 +111,40 @@ public boolean equals(Object o) {
public int hashCode() {
return new HashCodeBuilder().append(entryId).toHashCode();
}
public long getBytesNeeded() {
return bytesNeeded;
}
public void clearBytesNeeded() {
this.bytesNeeded = 0;
}
public void addBytesNeeded(long toAdd) {
this.bytesNeeded += toAdd;
}
public long getBytesCached() {
return bytesCached;
}
public void clearBytesCached() {
this.bytesCached = 0;
}
public void addBytesCached(long toAdd) {
this.bytesCached += toAdd;
}
public long getFilesAffected() {
return filesAffected;
}
public void clearFilesAffected() {
this.filesAffected = 0;
}
public void incrementFilesAffected() {
this.filesAffected++;
}
};

View File

@ -1583,6 +1583,15 @@ public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) {
if (directive.getPool() != null) {
builder.setPool(directive.getPool());
}
if (directive.getBytesNeeded() != null) {
builder.setBytesNeeded(directive.getBytesNeeded());
}
if (directive.getBytesCached() != null) {
builder.setBytesCached(directive.getBytesCached());
}
if (directive.getFilesAffected() != null) {
builder.setFilesAffected(directive.getFilesAffected());
}
return builder.build();
}
@ -1603,6 +1612,15 @@ public static DataChecksum.Type convert(HdfsProtos.ChecksumTypeProto type) {
if (proto.hasPool()) {
builder.setPool(proto.getPool());
}
if (proto.hasBytesNeeded()) {
builder.setBytesNeeded(proto.getBytesNeeded());
}
if (proto.hasBytesCached()) {
builder.setBytesCached(proto.getBytesCached());
}
if (proto.hasFilesAffected()) {
builder.setFilesAffected(proto.getFilesAffected());
}
return builder.build();
}

View File

@ -198,11 +198,6 @@ private void rescan() {
namesystem.writeLock();
try {
rescanPathBasedCacheEntries();
} finally {
namesystem.writeUnlock();
}
namesystem.writeLock();
try {
rescanCachedBlockMap();
blockManager.getDatanodeManager().resetLastCachingDirectiveSentTime();
} finally {
@ -220,6 +215,9 @@ private void rescanPathBasedCacheEntries() {
FSDirectory fsDir = namesystem.getFSDirectory();
for (PathBasedCacheEntry pce : cacheManager.getEntriesById().values()) {
scannedDirectives++;
pce.clearBytesNeeded();
pce.clearBytesCached();
pce.clearFilesAffected();
String path = pce.getPath();
INode node;
try {
@ -258,12 +256,18 @@ private void rescanPathBasedCacheEntries() {
* @param file The file.
*/
private void rescanFile(PathBasedCacheEntry pce, INodeFile file) {
pce.incrementFilesAffected();
BlockInfo[] blockInfos = file.getBlocks();
long cachedTotal = 0;
long neededTotal = 0;
for (BlockInfo blockInfo : blockInfos) {
if (!blockInfo.getBlockUCState().equals(BlockUCState.COMPLETE)) {
// We don't try to cache blocks that are under construction.
continue;
}
long neededByBlock =
pce.getReplication() * blockInfo.getNumBytes();
neededTotal += neededByBlock;
Block block = new Block(blockInfo.getBlockId());
CachedBlock ncblock = new CachedBlock(block.getBlockId(),
pce.getReplication(), mark);
@ -271,6 +275,18 @@ private void rescanFile(PathBasedCacheEntry pce, INodeFile file) {
if (ocblock == null) {
cachedBlocks.put(ncblock);
} else {
// Update bytesUsed using the current replication levels.
// Assumptions: we assume that all the blocks are the same length
// on each datanode. We can assume this because we're only caching
// blocks in state COMMITTED.
// Note that if two directives are caching the same block(s), they will
// both get them added to their bytesCached.
List<DatanodeDescriptor> cachedOn =
ocblock.getDatanodes(Type.CACHED);
long cachedByBlock = Math.min(cachedOn.size(), pce.getReplication()) *
blockInfo.getNumBytes();
cachedTotal += cachedByBlock;
if (mark != ocblock.getMark()) {
// Mark hasn't been set in this scan, so update replication and mark.
ocblock.setReplicationAndMark(pce.getReplication(), mark);
@ -282,6 +298,12 @@ private void rescanFile(PathBasedCacheEntry pce, INodeFile file) {
}
}
}
pce.addBytesNeeded(neededTotal);
pce.addBytesCached(cachedTotal);
if (LOG.isTraceEnabled()) {
LOG.debug("Directive " + pce.getEntryId() + " is caching " +
file.getFullPathName() + ": " + cachedTotal + "/" + neededTotal);
}
}
/**

View File

@ -525,6 +525,21 @@ boolean processCommandFromActor(DatanodeCommand cmd,
}
}
private String blockIdArrayToString(long ids[]) {
long maxNumberOfBlocksToLog = dn.getMaxNumberOfBlocksToLog();
StringBuilder bld = new StringBuilder();
String prefix = "";
for (int i = 0; i < ids.length; i++) {
if (i >= maxNumberOfBlocksToLog) {
bld.append("...");
break;
}
bld.append(prefix).append(ids[i]);
prefix = ", ";
}
return bld.toString();
}
/**
* This method should handle all commands from Active namenode except
* DNA_REGISTER which should be handled earlier itself.
@ -565,12 +580,16 @@ private boolean processCommandFromActive(DatanodeCommand cmd,
dn.metrics.incrBlocksRemoved(toDelete.length);
break;
case DatanodeProtocol.DNA_CACHE:
LOG.info("DatanodeCommand action: DNA_CACHE");
LOG.info("DatanodeCommand action: DNA_CACHE for " +
blockIdCmd.getBlockPoolId() + " of [" +
blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
dn.getFSDataset().cache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
dn.metrics.incrBlocksCached(blockIdCmd.getBlockIds().length);
break;
case DatanodeProtocol.DNA_UNCACHE:
LOG.info("DatanodeCommand action: DNA_UNCACHE");
LOG.info("DatanodeCommand action: DNA_UNCACHE for " +
blockIdCmd.getBlockPoolId() + " of [" +
blockIdArrayToString(blockIdCmd.getBlockIds()) + "]");
dn.getFSDataset().uncache(blockIdCmd.getBlockPoolId(), blockIdCmd.getBlockIds());
dn.metrics.incrBlocksUncached(blockIdCmd.getBlockIds().length);
break;

View File

@ -459,7 +459,7 @@ DatanodeCommand cacheReport() throws IOException {
long sendCost = sendTime - createTime;
dn.getMetrics().addCacheReport(sendCost);
LOG.info("CacheReport of " + blockIds.size()
+ " blocks took " + createCost + " msec to generate and "
+ " block(s) took " + createCost + " msec to generate and "
+ sendCost + " msecs for RPC and NN processing");
}
return cmd;

View File

@ -207,6 +207,7 @@ public static InetSocketAddress createSocketAddr(String target) {
private SecureResources secureResources = null;
private AbstractList<File> dataDirs;
private Configuration conf;
private final long maxNumberOfBlocksToLog;
private final List<String> usersWithLocalPathAccess;
private boolean connectToDnViaHostname;
@ -231,6 +232,8 @@ public static InetSocketAddress createSocketAddr(String target) {
final AbstractList<File> dataDirs,
final SecureResources resources) throws IOException {
super(conf);
this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);
this.usersWithLocalPathAccess = Arrays.asList(
conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
@ -1031,6 +1034,10 @@ private void checkBlockLocalPathAccess() throws IOException {
}
}
public long getMaxNumberOfBlocksToLog() {
return maxNumberOfBlocksToLog;
}
@Override
public BlockLocalPathInfo getBlockLocalPathInfo(ExtendedBlock block,
Token<BlockTokenIdentifier> token) throws IOException {

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.tools;
import java.io.IOException;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.List;
@ -394,7 +395,7 @@ public String getName() {
@Override
public String getShortUsage() {
return "[" + getName() + " [-path <path>] [-pool <pool>]]\n";
return "[" + getName() + " [-stats] [-path <path>] [-pool <pool>]]\n";
}
@Override
@ -406,6 +407,7 @@ public String getLongUsage() {
"in a cache pool that we don't have read access for, it " +
"will not be listed.");
listing.addRow("<pool>", "List only path cache directives in that pool.");
listing.addRow("-stats", "List path-based cache directive statistics.");
return getShortUsage() + "\n" +
"List PathBasedCache directives.\n\n" +
listing.toString();
@ -423,28 +425,40 @@ public int run(Configuration conf, List<String> args) throws IOException {
if (poolFilter != null) {
builder.setPool(poolFilter);
}
boolean printStats = StringUtils.popOption("-stats", args);
if (!args.isEmpty()) {
System.err.println("Can't understand argument: " + args.get(0));
return 1;
}
TableListing tableListing = new TableListing.Builder().
addField("ID", Justification.LEFT).
TableListing.Builder tableBuilder = new TableListing.Builder().
addField("ID", Justification.RIGHT).
addField("POOL", Justification.LEFT).
addField("REPLICATION", Justification.LEFT).
addField("PATH", Justification.LEFT).
build();
addField("REPLICATION", Justification.RIGHT).
addField("PATH", Justification.LEFT);
if (printStats) {
tableBuilder.addField("NEEDED", Justification.RIGHT).
addField("CACHED", Justification.RIGHT).
addField("FILES", Justification.RIGHT);
}
TableListing tableListing = tableBuilder.build();
DistributedFileSystem dfs = getDFS(conf);
RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(builder.build());
int numEntries = 0;
while (iter.hasNext()) {
PathBasedCacheDirective directive = iter.next();
String row[] = new String[] {
"" + directive.getId(), directive.getPool(),
"" + directive.getReplication(),
directive.getPath().toUri().getPath(),
};
tableListing.addRow(row);
List<String> row = new LinkedList<String>();
row.add("" + directive.getId());
row.add(directive.getPool());
row.add("" + directive.getReplication());
row.add(directive.getPath().toUri().getPath());
if (printStats) {
row.add("" + directive.getBytesNeeded());
row.add("" + directive.getBytesCached());
row.add("" + directive.getFilesAffected());
}
tableListing.addRow(row.toArray(new String[0]));
numEntries++;
}
System.out.print(String.format("Found %d entr%s\n",
@ -734,7 +748,7 @@ public int run(Configuration conf, List<String> args) throws IOException {
addField("OWNER", Justification.LEFT).
addField("GROUP", Justification.LEFT).
addField("MODE", Justification.LEFT).
addField("WEIGHT", Justification.LEFT).
addField("WEIGHT", Justification.RIGHT).
build();
int numResults = 0;
try {

View File

@ -30,9 +30,9 @@
* Example:
*
* NAME OWNER GROUP MODE WEIGHT
* pool1 andrew andrew rwxr-xr-x 100
* pool2 andrew andrew rwxr-xr-x 100
* pool3 andrew andrew rwxr-xr-x 100
* pool1 andrew andrew rwxr-xr-x 100
* pool2 andrew andrew rwxr-xr-x 100
* pool3 andrew andrew rwxr-xr-x 100
*
*/
@InterfaceAudience.Private

View File

@ -368,6 +368,9 @@ message PathBasedCacheDirectiveInfoProto {
optional string path = 2;
optional uint32 replication = 3;
optional string pool = 4;
optional int64 bytesNeeded = 5;
optional int64 bytesCached = 6;
optional int64 filesAffected = 7;
}
message AddPathBasedCacheDirectiveRequestProto {

View File

@ -56,6 +56,7 @@
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathBasedCacheDirective;
import org.apache.hadoop.hdfs.server.blockmanagement.CacheReplicationMonitor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@ -66,7 +67,10 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.GSet;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -100,6 +104,7 @@ public void setup() throws Exception {
proto = cluster.getNameNodeRpc();
prevCacheManipulator = NativeIO.POSIX.getCacheManipulator();
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
LogManager.getLogger(CacheReplicationMonitor.class).setLevel(Level.TRACE);
}
@After
@ -796,10 +801,65 @@ public void testWaitForCachedReplicasInDirectory() throws Exception {
build());
waitForCachedBlocks(namenode, 4, 8,
"testWaitForCachedReplicasInDirectory:1");
// Verify that listDirectives gives the stats we want.
RemoteIterator<PathBasedCacheDirective> iter =
dfs.listPathBasedCacheDirectives(new PathBasedCacheDirective.Builder().
setPath(new Path("/foo")).
build());
PathBasedCacheDirective directive = iter.next();
Assert.assertEquals(Long.valueOf(2),
directive.getFilesAffected());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
directive.getBytesNeeded());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
directive.getBytesCached());
long id2 = dfs.addPathBasedCacheDirective(
new PathBasedCacheDirective.Builder().
setPath(new Path("/foo/bar")).
setReplication((short)4).
setPool(pool).
build());
// wait for an additional 2 cached replicas to come up
waitForCachedBlocks(namenode, 4, 10,
"testWaitForCachedReplicasInDirectory:2");
// the directory directive's stats are unchanged
iter = dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().
setPath(new Path("/foo")).
build());
directive = iter.next();
Assert.assertEquals(Long.valueOf(2),
directive.getFilesAffected());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
directive.getBytesNeeded());
Assert.assertEquals(Long.valueOf(
2 * numBlocksPerFile * BLOCK_SIZE * 2),
directive.getBytesCached());
// verify /foo/bar's stats
iter = dfs.listPathBasedCacheDirectives(
new PathBasedCacheDirective.Builder().
setPath(new Path("/foo/bar")).
build());
directive = iter.next();
Assert.assertEquals(Long.valueOf(1),
directive.getFilesAffected());
Assert.assertEquals(Long.valueOf(
4 * numBlocksPerFile * BLOCK_SIZE),
directive.getBytesNeeded());
// only 3 because the file only has 3 replicas, not 4 as requested.
Assert.assertEquals(Long.valueOf(
3 * numBlocksPerFile * BLOCK_SIZE),
directive.getBytesCached());
// remove and watch numCached go to 0
dfs.removePathBasedCacheDirective(id);
dfs.removePathBasedCacheDirective(id2);
waitForCachedBlocks(namenode, 0, 0,
"testWaitForCachedReplicasInDirectory:2");
"testWaitForCachedReplicasInDirectory:3");
} finally {
cluster.shutdown();
}

View File

@ -90,7 +90,7 @@
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>poolparty bob bobgroup rwxrwxrwx 51</expected-output>
<expected-output>poolparty bob bobgroup rwxrwxrwx 51</expected-output>
</comparator>
</comparators>
</test>
@ -129,11 +129,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>bar alice alicegroup rwxr-xr-x 100 </expected-output>
<expected-output>bar alice alicegroup rwxr-xr-x 100</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>foo bob bob rw-rw-r-- 100 </expected-output>
<expected-output>foo bob bob rw-rw-r-- 100</expected-output>
</comparator>
</comparators>
</test>
@ -156,7 +156,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>foo bob bob rw-rw-r-- 100 </expected-output>
<expected-output>foo bob bob rw-rw-r-- 100</expected-output>
</comparator>
</comparators>
</test>
@ -180,15 +180,15 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>1 pool1 1 /foo</expected-output>
<expected-output> 1 pool1 1 /foo</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>2 pool1 1 /bar</expected-output>
<expected-output> 2 pool1 1 /bar</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>3 pool1 2 /baz</expected-output>
<expected-output> 3 pool1 2 /baz</expected-output>
</comparator>
</comparators>
</test>
@ -234,11 +234,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>8 pool2 1 /baz</expected-output>
<expected-output> 8 pool2 1 /baz</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>9 pool2 1 /buz</expected-output>
<expected-output> 9 pool2 1 /buz</expected-output>
</comparator>
</comparators>
</test>
@ -265,11 +265,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>10 pool1 1 /foo</expected-output>
<expected-output> 10 pool1 1 /foo</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>12 pool2 1 /foo</expected-output>
<expected-output> 12 pool2 1 /foo</expected-output>
</comparator>
</comparators>
</test>
@ -296,7 +296,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>16 pool2 1 /foo</expected-output>
<expected-output> 16 pool2 1 /foo</expected-output>
</comparator>
</comparators>
</test>
@ -320,7 +320,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>19 pool1 1 /bar</expected-output>
<expected-output> 19 pool1 1 /bar</expected-output>
</comparator>
</comparators>
</test>
@ -349,11 +349,11 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>22 pool1 1 /bar</expected-output>
<expected-output> 22 pool1 1 /bar</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>24 pool2 1 /bar</expected-output>
<expected-output> 24 pool2 1 /bar</expected-output>
</comparator>
</comparators>
</test>
@ -379,7 +379,7 @@
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>25 pool1 1 /bar3</expected-output>
<expected-output> 25 pool1 1 /bar3</expected-output>
</comparator>
</comparators>
</test>