HDFS-5636. Enforce a max TTL per cache pool (awang via cmccabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1552841 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-12-20 23:27:20 +00:00
parent 86e5db39ba
commit b9ae3087c0
17 changed files with 749 additions and 217 deletions

View File

@ -241,6 +241,8 @@ Trunk (Unreleased)
HDFS-5431. Support cachepool-based limit management in path-based caching HDFS-5431. Support cachepool-based limit management in path-based caching
(awang via cmccabe) (awang via cmccabe)
HDFS-5636. Enforce a max TTL per cache pool. (awang via cmccabe)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe) HDFS-5349. DNA_CACHE and DNA_UNCACHE should be by blockId only. (cmccabe)

View File

@ -1546,7 +1546,11 @@ public class DFSUtil {
* Converts a time duration in milliseconds into DDD:HH:MM:SS format. * Converts a time duration in milliseconds into DDD:HH:MM:SS format.
*/ */
public static String durationToString(long durationMs) { public static String durationToString(long durationMs) {
Preconditions.checkArgument(durationMs >= 0, "Invalid negative duration"); boolean negative = false;
if (durationMs < 0) {
negative = true;
durationMs = -durationMs;
}
// Chop off the milliseconds // Chop off the milliseconds
long durationSec = durationMs / 1000; long durationSec = durationMs / 1000;
final int secondsPerMinute = 60; final int secondsPerMinute = 60;
@ -1559,7 +1563,12 @@ public class DFSUtil {
final long minutes = durationSec / secondsPerMinute; final long minutes = durationSec / secondsPerMinute;
durationSec -= minutes * secondsPerMinute; durationSec -= minutes * secondsPerMinute;
final long seconds = durationSec; final long seconds = durationSec;
return String.format("%03d:%02d:%02d:%02d", days, hours, minutes, seconds); final long milliseconds = durationMs % 1000;
String format = "%03d:%02d:%02d:%02d.%03d";
if (negative) {
format = "-" + format;
}
return String.format(format, days, hours, minutes, seconds, milliseconds);
} }
/** /**
@ -1571,9 +1580,9 @@ public class DFSUtil {
+ ": too short"); + ": too short");
} }
String ttlString = relTime.substring(0, relTime.length()-1); String ttlString = relTime.substring(0, relTime.length()-1);
int ttl; long ttl;
try { try {
ttl = Integer.parseInt(ttlString); ttl = Long.parseLong(ttlString);
} catch (NumberFormatException e) { } catch (NumberFormatException e) {
throw new IOException("Unable to parse relative time value of " + relTime throw new IOException("Unable to parse relative time value of " + relTime
+ ": " + ttlString + " is not a number"); + ": " + ttlString + " is not a number");

View File

@ -52,6 +52,14 @@ public final class CacheDirective implements IntrusiveCollection.Element {
private Element prev; private Element prev;
private Element next; private Element next;
public CacheDirective(CacheDirectiveInfo info) {
this(
info.getId(),
info.getPath().toUri().getPath(),
info.getReplication(),
info.getExpiration().getAbsoluteMillis());
}
public CacheDirective(long id, String path, public CacheDirective(long id, String path,
short replication, long expiryTime) { short replication, long expiryTime) {
Preconditions.checkArgument(id > 0); Preconditions.checkArgument(id > 0);

View File

@ -26,6 +26,8 @@ import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import com.google.common.base.Preconditions;
/** /**
* Describes a path-based cache directive. * Describes a path-based cache directive.
*/ */
@ -138,11 +140,22 @@ public class CacheDirectiveInfo {
*/ */
public static class Expiration { public static class Expiration {
/** Denotes a CacheDirectiveInfo that never expires **/ /**
public static final int EXPIRY_NEVER = -1; * The maximum value we accept for a relative expiry.
*/
public static final long MAX_RELATIVE_EXPIRY_MS =
Long.MAX_VALUE / 4; // This helps prevent weird overflow bugs
/**
* An relative Expiration that never expires.
*/
public static final Expiration NEVER = newRelative(MAX_RELATIVE_EXPIRY_MS);
/** /**
* Create a new relative Expiration. * Create a new relative Expiration.
* <p>
* Use {@link Expiration#NEVER} to indicate an Expiration that never
* expires.
* *
* @param ms how long until the CacheDirective expires, in milliseconds * @param ms how long until the CacheDirective expires, in milliseconds
* @return A relative Expiration * @return A relative Expiration
@ -153,6 +166,9 @@ public class CacheDirectiveInfo {
/** /**
* Create a new absolute Expiration. * Create a new absolute Expiration.
* <p>
* Use {@link Expiration#NEVER} to indicate an Expiration that never
* expires.
* *
* @param date when the CacheDirective expires * @param date when the CacheDirective expires
* @return An absolute Expiration * @return An absolute Expiration
@ -163,6 +179,9 @@ public class CacheDirectiveInfo {
/** /**
* Create a new absolute Expiration. * Create a new absolute Expiration.
* <p>
* Use {@link Expiration#NEVER} to indicate an Expiration that never
* expires.
* *
* @param ms when the CacheDirective expires, in milliseconds since the Unix * @param ms when the CacheDirective expires, in milliseconds since the Unix
* epoch. * epoch.
@ -176,6 +195,10 @@ public class CacheDirectiveInfo {
private final boolean isRelative; private final boolean isRelative;
private Expiration(long ms, boolean isRelative) { private Expiration(long ms, boolean isRelative) {
if (isRelative) {
Preconditions.checkArgument(ms <= MAX_RELATIVE_EXPIRY_MS,
"Expiration time is too far in the future!");
}
this.ms = ms; this.ms = ms;
this.isRelative = isRelative; this.isRelative = isRelative;
} }

View File

@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.InvalidRequestException; import org.apache.hadoop.fs.InvalidRequestException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
/** /**
* CachePoolInfo describes a cache pool. * CachePoolInfo describes a cache pool.
@ -42,6 +43,20 @@ import org.apache.hadoop.fs.permission.FsPermission;
public class CachePoolInfo { public class CachePoolInfo {
public static final Log LOG = LogFactory.getLog(CachePoolInfo.class); public static final Log LOG = LogFactory.getLog(CachePoolInfo.class);
/**
* Indicates that the pool does not have a maximum relative expiry.
*/
public static final long RELATIVE_EXPIRY_NEVER =
Expiration.MAX_RELATIVE_EXPIRY_MS;
/**
* Default max relative expiry for cache pools.
*/
public static final long DEFAULT_MAX_RELATIVE_EXPIRY =
RELATIVE_EXPIRY_NEVER;
public static final long LIMIT_UNLIMITED = Long.MAX_VALUE;
public static final long DEFAULT_LIMIT = LIMIT_UNLIMITED;
final String poolName; final String poolName;
@Nullable @Nullable
@ -56,14 +71,24 @@ public class CachePoolInfo {
@Nullable @Nullable
Long limit; Long limit;
@Nullable
Long maxRelativeExpiryMs;
public CachePoolInfo(String poolName) { public CachePoolInfo(String poolName) {
this.poolName = poolName; this.poolName = poolName;
} }
/**
* @return Name of the pool.
*/
public String getPoolName() { public String getPoolName() {
return poolName; return poolName;
} }
/**
* @return The owner of the pool. Along with the group and mode, determines
* who has access to view and modify the pool.
*/
public String getOwnerName() { public String getOwnerName() {
return ownerName; return ownerName;
} }
@ -73,6 +98,10 @@ public class CachePoolInfo {
return this; return this;
} }
/**
* @return The group of the pool. Along with the owner and mode, determines
* who has access to view and modify the pool.
*/
public String getGroupName() { public String getGroupName() {
return groupName; return groupName;
} }
@ -82,6 +111,10 @@ public class CachePoolInfo {
return this; return this;
} }
/**
* @return Unix-style permissions of the pool. Along with the owner and group,
* determines who has access to view and modify the pool.
*/
public FsPermission getMode() { public FsPermission getMode() {
return mode; return mode;
} }
@ -91,6 +124,10 @@ public class CachePoolInfo {
return this; return this;
} }
/**
* @return The maximum aggregate number of bytes that can be cached by
* directives in this pool.
*/
public Long getLimit() { public Long getLimit() {
return limit; return limit;
} }
@ -100,6 +137,26 @@ public class CachePoolInfo {
return this; return this;
} }
/**
* @return The maximum relative expiration of directives of this pool in
* milliseconds
*/
public Long getMaxRelativeExpiryMs() {
return maxRelativeExpiryMs;
}
/**
* Set the maximum relative expiration of directives of this pool in
* milliseconds.
*
* @param ms in milliseconds
* @return This builder, for call chaining.
*/
public CachePoolInfo setMaxRelativeExpiryMs(Long ms) {
this.maxRelativeExpiryMs = ms;
return this;
}
public String toString() { public String toString() {
return new StringBuilder().append("{"). return new StringBuilder().append("{").
append("poolName:").append(poolName). append("poolName:").append(poolName).
@ -108,6 +165,7 @@ public class CachePoolInfo {
append(", mode:").append((mode == null) ? "null" : append(", mode:").append((mode == null) ? "null" :
String.format("0%03o", mode.toShort())). String.format("0%03o", mode.toShort())).
append(", limit:").append(limit). append(", limit:").append(limit).
append(", maxRelativeExpiryMs:").append(maxRelativeExpiryMs).
append("}").toString(); append("}").toString();
} }
@ -125,6 +183,7 @@ public class CachePoolInfo {
append(groupName, other.groupName). append(groupName, other.groupName).
append(mode, other.mode). append(mode, other.mode).
append(limit, other.limit). append(limit, other.limit).
append(maxRelativeExpiryMs, other.maxRelativeExpiryMs).
isEquals(); isEquals();
} }
@ -136,6 +195,7 @@ public class CachePoolInfo {
append(groupName). append(groupName).
append(mode). append(mode).
append(limit). append(limit).
append(maxRelativeExpiryMs).
hashCode(); hashCode();
} }
@ -146,6 +206,15 @@ public class CachePoolInfo {
if ((info.getLimit() != null) && (info.getLimit() < 0)) { if ((info.getLimit() != null) && (info.getLimit() < 0)) {
throw new InvalidRequestException("Limit is negative."); throw new InvalidRequestException("Limit is negative.");
} }
if (info.getMaxRelativeExpiryMs() != null) {
long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
if (maxRelativeExpiryMs < 0l) {
throw new InvalidRequestException("Max relative expiry is negative.");
}
if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
throw new InvalidRequestException("Max relative expiry is too big.");
}
}
validateName(info.poolName); validateName(info.poolName);
} }

View File

@ -1816,6 +1816,9 @@ public class PBHelper {
if (info.getLimit() != null) { if (info.getLimit() != null) {
builder.setLimit(info.getLimit()); builder.setLimit(info.getLimit());
} }
if (info.getMaxRelativeExpiryMs() != null) {
builder.setMaxRelativeExpiry(info.getMaxRelativeExpiryMs());
}
return builder.build(); return builder.build();
} }
@ -1835,6 +1838,9 @@ public class PBHelper {
if (proto.hasLimit()) { if (proto.hasLimit()) {
info.setLimit(proto.getLimit()); info.setLimit(proto.getLimit());
} }
if (proto.hasMaxRelativeExpiry()) {
info.setMaxRelativeExpiryMs(proto.getMaxRelativeExpiry());
}
return info; return info;
} }

View File

@ -365,7 +365,7 @@ public class CacheReplicationMonitor extends Thread implements Closeable {
if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) { if (directive.getExpiryTime() > 0 && directive.getExpiryTime() <= now) {
if (LOG.isDebugEnabled()) { if (LOG.isDebugEnabled()) {
LOG.debug("Skipping directive id " + directive.getId() LOG.debug("Skipping directive id " + directive.getId()
+ " because it has expired (" + directive.getExpiryTime() + ">=" + " because it has expired (" + directive.getExpiryTime() + "<="
+ now + ")"); + now + ")");
} }
continue; continue;

View File

@ -32,6 +32,7 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Collection; import java.util.Collection;
import java.util.Collections; import java.util.Collections;
import java.util.Date;
import java.util.EnumSet; import java.util.EnumSet;
import java.util.Iterator; import java.util.Iterator;
import java.util.LinkedList; import java.util.LinkedList;
@ -55,6 +56,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CacheDirective; import org.apache.hadoop.hdfs.protocol.CacheDirective;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
@ -322,27 +324,48 @@ public final class CacheManager {
* {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration * {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
* into an absolute time based on the local clock. * into an absolute time based on the local clock.
* *
* @param directive from which to get the expiry time * @param info to validate.
* @param defaultValue to use if Expiration is not set * @param maxRelativeExpiryTime of the info's pool.
* @return Absolute expiry time in milliseconds since Unix epoch * @return the expiration time, or the pool's max absolute expiration if the
* @throws InvalidRequestException if the Expiration is invalid * info's expiration was not set.
* @throws InvalidRequestException if the info's Expiration is invalid.
*/ */
private static long validateExpiryTime(CacheDirectiveInfo directive, private static long validateExpiryTime(CacheDirectiveInfo info,
long defaultValue) throws InvalidRequestException { long maxRelativeExpiryTime) throws InvalidRequestException {
long expiryTime; if (LOG.isTraceEnabled()) {
CacheDirectiveInfo.Expiration expiration = directive.getExpiration(); LOG.trace("Validating directive " + info
if (expiration != null) { + " pool maxRelativeExpiryTime " + maxRelativeExpiryTime);
if (expiration.getMillis() < 0) { }
final long now = new Date().getTime();
final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
if (info == null || info.getExpiration() == null) {
return maxAbsoluteExpiryTime;
}
Expiration expiry = info.getExpiration();
if (expiry.getMillis() < 0l) {
throw new InvalidRequestException("Cannot set a negative expiration: " throw new InvalidRequestException("Cannot set a negative expiration: "
+ expiration.getMillis()); + expiry.getMillis());
} }
// Converts a relative duration into an absolute time based on the local long relExpiryTime, absExpiryTime;
// clock if (expiry.isRelative()) {
expiryTime = expiration.getAbsoluteMillis(); relExpiryTime = expiry.getMillis();
absExpiryTime = now + relExpiryTime;
} else { } else {
expiryTime = defaultValue; absExpiryTime = expiry.getMillis();
relExpiryTime = absExpiryTime - now;
} }
return expiryTime; // Need to cap the expiry so we don't overflow a long when doing math
if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
throw new InvalidRequestException("Expiration "
+ expiry.toString() + " is too far in the future!");
}
// Fail if the requested expiry is greater than the max
if (relExpiryTime > maxRelativeExpiryTime) {
throw new InvalidRequestException("Expiration " + expiry.toString()
+ " exceeds the max relative expiration time of "
+ maxRelativeExpiryTime + " ms.");
}
return absExpiryTime;
} }
/** /**
@ -357,6 +380,9 @@ public final class CacheManager {
private void checkLimit(CachePool pool, String path, private void checkLimit(CachePool pool, String path,
short replication) throws InvalidRequestException { short replication) throws InvalidRequestException {
CacheDirectiveStats stats = computeNeeded(path, replication); CacheDirectiveStats stats = computeNeeded(path, replication);
if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
return;
}
if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
.getLimit()) { .getLimit()) {
throw new InvalidRequestException("Caching path " + path + " of size " throw new InvalidRequestException("Caching path " + path + " of size "
@ -461,17 +487,13 @@ public final class CacheManager {
} }
/** /**
* To be called only from the edit log loading code * Adds a directive, skipping most error checking. This should only be called
* internally in special scenarios like edit log replay.
*/ */
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive) CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
throws InvalidRequestException { throws InvalidRequestException {
long id = directive.getId(); long id = directive.getId();
CacheDirective entry = CacheDirective entry = new CacheDirective(directive);
new CacheDirective(
directive.getId(),
directive.getPath().toUri().getPath(),
directive.getReplication(),
directive.getExpiration().getAbsoluteMillis());
CachePool pool = cachePools.get(directive.getPool()); CachePool pool = cachePools.get(directive.getPool());
addInternal(entry, pool); addInternal(entry, pool);
if (nextDirectiveId <= id) { if (nextDirectiveId <= id) {
@ -490,8 +512,7 @@ public final class CacheManager {
checkWritePermission(pc, pool); checkWritePermission(pc, pool);
String path = validatePath(info); String path = validatePath(info);
short replication = validateReplication(info, (short)1); short replication = validateReplication(info, (short)1);
long expiryTime = validateExpiryTime(info, long expiryTime = validateExpiryTime(info, pool.getMaxRelativeExpiryMs());
CacheDirectiveInfo.Expiration.EXPIRY_NEVER);
// Do quota validation if required // Do quota validation if required
if (!flags.contains(CacheFlag.FORCE)) { if (!flags.contains(CacheFlag.FORCE)) {
// Can't kick and wait if caching is disabled // Can't kick and wait if caching is disabled
@ -513,6 +534,56 @@ public final class CacheManager {
return directive.toInfo(); return directive.toInfo();
} }
/**
* Factory method that makes a new CacheDirectiveInfo by applying fields in a
* CacheDirectiveInfo to an existing CacheDirective.
*
* @param info with some or all fields set.
* @param defaults directive providing default values for unset fields in
* info.
*
* @return new CacheDirectiveInfo of the info applied to the defaults.
*/
private static CacheDirectiveInfo createFromInfoAndDefaults(
CacheDirectiveInfo info, CacheDirective defaults) {
// Initialize the builder with the default values
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder(defaults.toInfo());
// Replace default with new value if present
if (info.getPath() != null) {
builder.setPath(info.getPath());
}
if (info.getReplication() != null) {
builder.setReplication(info.getReplication());
}
if (info.getPool() != null) {
builder.setPool(info.getPool());
}
if (info.getExpiration() != null) {
builder.setExpiration(info.getExpiration());
}
return builder.build();
}
/**
* Modifies a directive, skipping most error checking. This is for careful
* internal use only. modifyDirective can be non-deterministic since its error
* checking depends on current system time, which poses a problem for edit log
* replay.
*/
void modifyDirectiveFromEditLog(CacheDirectiveInfo info)
throws InvalidRequestException {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
removeInternal(prevEntry);
addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
}
public void modifyDirective(CacheDirectiveInfo info, public void modifyDirective(CacheDirectiveInfo info,
FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException { FSPermissionChecker pc, EnumSet<CacheFlag> flags) throws IOException {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
@ -527,33 +598,38 @@ public final class CacheManager {
} }
CacheDirective prevEntry = getById(id); CacheDirective prevEntry = getById(id);
checkWritePermission(pc, prevEntry.getPool()); checkWritePermission(pc, prevEntry.getPool());
String path = prevEntry.getPath();
if (info.getPath() != null) {
path = validatePath(info);
}
short replication = prevEntry.getReplication(); // Fill in defaults
replication = validateReplication(info, replication); CacheDirectiveInfo infoWithDefaults =
createFromInfoAndDefaults(info, prevEntry);
CacheDirectiveInfo.Builder builder =
new CacheDirectiveInfo.Builder(infoWithDefaults);
long expiryTime = prevEntry.getExpiryTime(); // Do validation
expiryTime = validateExpiryTime(info, expiryTime); validatePath(infoWithDefaults);
validateReplication(infoWithDefaults, (short)-1);
CachePool pool = prevEntry.getPool(); // Need to test the pool being set here to avoid rejecting a modify for a
if (info.getPool() != null) { // directive that's already been forced into a pool
pool = getCachePool(validatePoolName(info)); CachePool srcPool = prevEntry.getPool();
checkWritePermission(pc, pool); CachePool destPool = getCachePool(validatePoolName(infoWithDefaults));
if (!srcPool.getPoolName().equals(destPool.getPoolName())) {
checkWritePermission(pc, destPool);
if (!flags.contains(CacheFlag.FORCE)) { if (!flags.contains(CacheFlag.FORCE)) {
// Can't kick and wait if caching is disabled checkLimit(destPool, infoWithDefaults.getPath().toUri().getPath(),
infoWithDefaults.getReplication());
}
}
// Verify the expiration against the destination pool
validateExpiryTime(infoWithDefaults, destPool.getMaxRelativeExpiryMs());
// Indicate changes to the CRM
if (monitor != null) { if (monitor != null) {
monitor.waitForRescan(); monitor.setNeedsRescan();
}
checkLimit(pool, path, replication);
}
} }
// Validation passed
removeInternal(prevEntry); removeInternal(prevEntry);
CacheDirective newEntry = addInternal(new CacheDirective(builder.build()), destPool);
new CacheDirective(id, path, replication, expiryTime);
addInternal(newEntry, pool);
} catch (IOException e) { } catch (IOException e) {
LOG.warn("modifyDirective of " + idString + " failed: ", e); LOG.warn("modifyDirective of " + idString + " failed: ", e);
throw e; throw e;
@ -562,7 +638,7 @@ public final class CacheManager {
info+ "."); info+ ".");
} }
public void removeInternal(CacheDirective directive) private void removeInternal(CacheDirective directive)
throws InvalidRequestException { throws InvalidRequestException {
assert namesystem.hasWriteLock(); assert namesystem.hasWriteLock();
// Remove the corresponding entry in directivesByPath. // Remove the corresponding entry in directivesByPath.
@ -734,6 +810,13 @@ public final class CacheManager {
monitor.setNeedsRescan(); monitor.setNeedsRescan();
} }
} }
if (info.getMaxRelativeExpiryMs() != null) {
final Long maxRelativeExpiry = info.getMaxRelativeExpiryMs();
pool.setMaxRelativeExpiryMs(maxRelativeExpiry);
bld.append(prefix).append("set maxRelativeExpiry to "
+ maxRelativeExpiry);
prefix = "; ";
}
if (prefix.isEmpty()) { if (prefix.isEmpty()) {
bld.append("no changes."); bld.append("no changes.");
} }

View File

@ -49,8 +49,6 @@ import com.google.common.base.Preconditions;
public final class CachePool { public final class CachePool {
public static final Log LOG = LogFactory.getLog(CachePool.class); public static final Log LOG = LogFactory.getLog(CachePool.class);
public static final long DEFAULT_LIMIT = Long.MAX_VALUE;
@Nonnull @Nonnull
private final String poolName; private final String poolName;
@ -76,6 +74,12 @@ public final class CachePool {
*/ */
private long limit; private long limit;
/**
* Maximum duration that a CacheDirective in this pool remains valid,
* in milliseconds.
*/
private long maxRelativeExpiryMs;
private long bytesNeeded; private long bytesNeeded;
private long bytesCached; private long bytesCached;
private long filesNeeded; private long filesNeeded;
@ -122,9 +126,12 @@ public final class CachePool {
FsPermission mode = (info.getMode() == null) ? FsPermission mode = (info.getMode() == null) ?
FsPermission.getCachePoolDefault() : info.getMode(); FsPermission.getCachePoolDefault() : info.getMode();
long limit = info.getLimit() == null ? long limit = info.getLimit() == null ?
DEFAULT_LIMIT : info.getLimit(); CachePoolInfo.DEFAULT_LIMIT : info.getLimit();
long maxRelativeExpiry = info.getMaxRelativeExpiryMs() == null ?
CachePoolInfo.DEFAULT_MAX_RELATIVE_EXPIRY :
info.getMaxRelativeExpiryMs();
return new CachePool(info.getPoolName(), return new CachePool(info.getPoolName(),
ownerName, groupName, mode, limit); ownerName, groupName, mode, limit, maxRelativeExpiry);
} }
/** /**
@ -134,11 +141,11 @@ public final class CachePool {
static CachePool createFromInfo(CachePoolInfo info) { static CachePool createFromInfo(CachePoolInfo info) {
return new CachePool(info.getPoolName(), return new CachePool(info.getPoolName(),
info.getOwnerName(), info.getGroupName(), info.getOwnerName(), info.getGroupName(),
info.getMode(), info.getLimit()); info.getMode(), info.getLimit(), info.getMaxRelativeExpiryMs());
} }
CachePool(String poolName, String ownerName, String groupName, CachePool(String poolName, String ownerName, String groupName,
FsPermission mode, long limit) { FsPermission mode, long limit, long maxRelativeExpiry) {
Preconditions.checkNotNull(poolName); Preconditions.checkNotNull(poolName);
Preconditions.checkNotNull(ownerName); Preconditions.checkNotNull(ownerName);
Preconditions.checkNotNull(groupName); Preconditions.checkNotNull(groupName);
@ -148,6 +155,7 @@ public final class CachePool {
this.groupName = groupName; this.groupName = groupName;
this.mode = new FsPermission(mode); this.mode = new FsPermission(mode);
this.limit = limit; this.limit = limit;
this.maxRelativeExpiryMs = maxRelativeExpiry;
} }
public String getPoolName() { public String getPoolName() {
@ -190,6 +198,15 @@ public final class CachePool {
return this; return this;
} }
public long getMaxRelativeExpiryMs() {
return maxRelativeExpiryMs;
}
public CachePool setMaxRelativeExpiryMs(long expiry) {
this.maxRelativeExpiryMs = expiry;
return this;
}
/** /**
* Get either full or partial information about this CachePool. * Get either full or partial information about this CachePool.
* *
@ -207,7 +224,8 @@ public final class CachePool {
return info.setOwnerName(ownerName). return info.setOwnerName(ownerName).
setGroupName(groupName). setGroupName(groupName).
setMode(new FsPermission(mode)). setMode(new FsPermission(mode)).
setLimit(limit); setLimit(limit).
setMaxRelativeExpiryMs(maxRelativeExpiryMs);
} }
/** /**
@ -300,6 +318,7 @@ public final class CachePool {
append(", groupName:").append(groupName). append(", groupName:").append(groupName).
append(", mode:").append(mode). append(", mode:").append(mode).
append(", limit:").append(limit). append(", limit:").append(limit).
append(", maxRelativeExpiryMs:").append(maxRelativeExpiryMs).
append(" }").toString(); append(" }").toString();
} }

View File

@ -651,8 +651,8 @@ public class FSEditLogLoader {
case OP_MODIFY_CACHE_DIRECTIVE: { case OP_MODIFY_CACHE_DIRECTIVE: {
ModifyCacheDirectiveInfoOp modifyOp = ModifyCacheDirectiveInfoOp modifyOp =
(ModifyCacheDirectiveInfoOp) op; (ModifyCacheDirectiveInfoOp) op;
fsNamesys.getCacheManager().modifyDirective( fsNamesys.getCacheManager().modifyDirectiveFromEditLog(
modifyOp.directive, null, EnumSet.of(CacheFlag.FORCE)); modifyOp.directive);
if (toAddRetryCache) { if (toAddRetryCache) {
fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId); fsNamesys.addCacheEntry(op.rpcClientId, op.rpcCallId);
} }

View File

@ -587,18 +587,22 @@ public class FSImageSerialization {
final String groupName = info.getGroupName(); final String groupName = info.getGroupName();
final Long limit = info.getLimit(); final Long limit = info.getLimit();
final FsPermission mode = info.getMode(); final FsPermission mode = info.getMode();
final Long maxRelativeExpiry = info.getMaxRelativeExpiryMs();
boolean hasOwner, hasGroup, hasMode, hasLimit; boolean hasOwner, hasGroup, hasMode, hasLimit, hasMaxRelativeExpiry;
hasOwner = ownerName != null; hasOwner = ownerName != null;
hasGroup = groupName != null; hasGroup = groupName != null;
hasMode = mode != null; hasMode = mode != null;
hasLimit = limit != null; hasLimit = limit != null;
hasMaxRelativeExpiry = maxRelativeExpiry != null;
int flags = int flags =
(hasOwner ? 0x1 : 0) | (hasOwner ? 0x1 : 0) |
(hasGroup ? 0x2 : 0) | (hasGroup ? 0x2 : 0) |
(hasMode ? 0x4 : 0) | (hasMode ? 0x4 : 0) |
(hasLimit ? 0x8 : 0); (hasLimit ? 0x8 : 0) |
(hasMaxRelativeExpiry ? 0x10 : 0);
writeInt(flags, out); writeInt(flags, out);
if (hasOwner) { if (hasOwner) {
@ -613,6 +617,9 @@ public class FSImageSerialization {
if (hasLimit) { if (hasLimit) {
writeLong(limit, out); writeLong(limit, out);
} }
if (hasMaxRelativeExpiry) {
writeLong(maxRelativeExpiry, out);
}
} }
public static CachePoolInfo readCachePoolInfo(DataInput in) public static CachePoolInfo readCachePoolInfo(DataInput in)
@ -632,7 +639,10 @@ public class FSImageSerialization {
if ((flags & 0x8) != 0) { if ((flags & 0x8) != 0) {
info.setLimit(readLong(in)); info.setLimit(readLong(in));
} }
if ((flags & ~0xF) != 0) { if ((flags & 0x10) != 0) {
info.setMaxRelativeExpiryMs(readLong(in));
}
if ((flags & ~0x1F) != 0) {
throw new IOException("Unknown flag in CachePoolInfo: " + flags); throw new IOException("Unknown flag in CachePoolInfo: " + flags);
} }
return info; return info;
@ -646,6 +656,7 @@ public class FSImageSerialization {
final String groupName = info.getGroupName(); final String groupName = info.getGroupName();
final Long limit = info.getLimit(); final Long limit = info.getLimit();
final FsPermission mode = info.getMode(); final FsPermission mode = info.getMode();
final Long maxRelativeExpiry = info.getMaxRelativeExpiryMs();
if (ownerName != null) { if (ownerName != null) {
XMLUtils.addSaxString(contentHandler, "OWNERNAME", ownerName); XMLUtils.addSaxString(contentHandler, "OWNERNAME", ownerName);
@ -660,6 +671,10 @@ public class FSImageSerialization {
XMLUtils.addSaxString(contentHandler, "LIMIT", XMLUtils.addSaxString(contentHandler, "LIMIT",
Long.toString(limit)); Long.toString(limit));
} }
if (maxRelativeExpiry != null) {
XMLUtils.addSaxString(contentHandler, "MAXRELATIVEEXPIRY",
Long.toString(maxRelativeExpiry));
}
} }
public static CachePoolInfo readCachePoolInfo(Stanza st) public static CachePoolInfo readCachePoolInfo(Stanza st)
@ -678,6 +693,10 @@ public class FSImageSerialization {
if (st.hasChildren("LIMIT")) { if (st.hasChildren("LIMIT")) {
info.setLimit(Long.parseLong(st.getValue("LIMIT"))); info.setLimit(Long.parseLong(st.getValue("LIMIT")));
} }
if (st.hasChildren("MAXRELATIVEEXPIRY")) {
info.setMaxRelativeExpiryMs(
Long.parseLong(st.getValue("MAXRELATIVEEXPIRY")));
}
return info; return info;
} }

View File

@ -35,14 +35,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo.Expiration;
import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats;
import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolStats; import org.apache.hadoop.hdfs.protocol.CachePoolStats;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.tools.TableListing.Justification; import org.apache.hadoop.hdfs.tools.TableListing.Justification;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.Tool;
@ -120,6 +118,23 @@ public class CacheAdmin extends Configured implements Tool {
return listing; return listing;
} }
/**
* Parses a time-to-live value from a string
* @return The ttl in milliseconds
* @throws IOException if it could not be parsed
*/
private static Long parseTtlString(String maxTtlString) throws IOException {
Long maxTtl = null;
if (maxTtlString != null) {
if (maxTtlString.equalsIgnoreCase("never")) {
maxTtl = CachePoolInfo.RELATIVE_EXPIRY_NEVER;
} else {
maxTtl = DFSUtil.parseRelativeTime(maxTtlString);
}
}
return maxTtl;
}
interface Command { interface Command {
String getName(); String getName();
String getShortUsage(); String getShortUsage();
@ -154,7 +169,7 @@ public class CacheAdmin extends Configured implements Tool {
listing.addRow("<replication>", "The cache replication factor to use. " + listing.addRow("<replication>", "The cache replication factor to use. " +
"Defaults to 1."); "Defaults to 1.");
listing.addRow("<time-to-live>", "How long the directive is " + listing.addRow("<time-to-live>", "How long the directive is " +
"valid. Can be specified in minutes, hours, and days via e.g. " + "valid. Can be specified in minutes, hours, and days, e.g. " +
"30m, 4h, 2d. Valid units are [smhd]." + "30m, 4h, 2d. Valid units are [smhd]." +
" If unspecified, the directive never expires."); " If unspecified, the directive never expires.");
return getShortUsage() + "\n" + return getShortUsage() + "\n" +
@ -309,7 +324,7 @@ public class CacheAdmin extends Configured implements Tool {
"added. You must have write permission on the cache pool " "added. You must have write permission on the cache pool "
+ "in order to move a directive into it. (optional)"); + "in order to move a directive into it. (optional)");
listing.addRow("<time-to-live>", "How long the directive is " + listing.addRow("<time-to-live>", "How long the directive is " +
"valid. Can be specified in minutes, hours, and days via e.g. " + "valid. Can be specified in minutes, hours, and days, e.g. " +
"30m, 4h, 2d. Valid units are [smhd]." + "30m, 4h, 2d. Valid units are [smhd]." +
" If unspecified, the directive never expires."); " If unspecified, the directive never expires.");
return getShortUsage() + "\n" + return getShortUsage() + "\n" +
@ -419,12 +434,13 @@ public class CacheAdmin extends Configured implements Tool {
System.err.println("Usage is " + getShortUsage()); System.err.println("Usage is " + getShortUsage());
return 1; return 1;
} }
int exitCode = 0;
try {
DistributedFileSystem dfs = getDFS(conf); DistributedFileSystem dfs = getDFS(conf);
RemoteIterator<CacheDirectiveEntry> iter = RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives( dfs.listCacheDirectives(
new CacheDirectiveInfo.Builder(). new CacheDirectiveInfo.Builder().
setPath(new Path(path)).build()); setPath(new Path(path)).build());
int exitCode = 0;
while (iter.hasNext()) { while (iter.hasNext()) {
CacheDirectiveEntry entry = iter.next(); CacheDirectiveEntry entry = iter.next();
try { try {
@ -436,6 +452,10 @@ public class CacheAdmin extends Configured implements Tool {
exitCode = 2; exitCode = 2;
} }
} }
} catch (IOException e) {
System.err.println(prettifyException(e));
exitCode = 2;
}
if (exitCode == 0) { if (exitCode == 0) {
System.out.println("Removed every cache directive with path " + System.out.println("Removed every cache directive with path " +
path); path);
@ -500,7 +520,7 @@ public class CacheAdmin extends Configured implements Tool {
addField("FILES_CACHED", Justification.RIGHT); addField("FILES_CACHED", Justification.RIGHT);
} }
TableListing tableListing = tableBuilder.build(); TableListing tableListing = tableBuilder.build();
try {
DistributedFileSystem dfs = getDFS(conf); DistributedFileSystem dfs = getDFS(conf);
RemoteIterator<CacheDirectiveEntry> iter = RemoteIterator<CacheDirectiveEntry> iter =
dfs.listCacheDirectives(builder.build()); dfs.listCacheDirectives(builder.build());
@ -514,8 +534,9 @@ public class CacheAdmin extends Configured implements Tool {
row.add(directive.getPool()); row.add(directive.getPool());
row.add("" + directive.getReplication()); row.add("" + directive.getReplication());
String expiry; String expiry;
if (directive.getExpiration().getMillis() == // This is effectively never, round for nice printing
CacheDirectiveInfo.Expiration.EXPIRY_NEVER) { if (directive.getExpiration().getMillis() >
Expiration.MAX_RELATIVE_EXPIRY_MS / 2) {
expiry = "never"; expiry = "never";
} else { } else {
expiry = directive.getExpiration().toString(); expiry = directive.getExpiration().toString();
@ -536,6 +557,10 @@ public class CacheAdmin extends Configured implements Tool {
if (numEntries > 0) { if (numEntries > 0) {
System.out.print(tableListing); System.out.print(tableListing);
} }
} catch (IOException e) {
System.err.println(prettifyException(e));
return 2;
}
return 0; return 0;
} }
} }
@ -552,7 +577,8 @@ public class CacheAdmin extends Configured implements Tool {
@Override @Override
public String getShortUsage() { public String getShortUsage() {
return "[" + NAME + " <name> [-owner <owner>] " + return "[" + NAME + " <name> [-owner <owner>] " +
"[-group <group>] [-mode <mode>] [-limit <limit>]]\n"; "[-group <group>] [-mode <mode>] [-limit <limit>] " +
"[-maxttl <maxTtl>]\n";
} }
@Override @Override
@ -571,7 +597,11 @@ public class CacheAdmin extends Configured implements Tool {
listing.addRow("<limit>", "The maximum number of bytes that can be " + listing.addRow("<limit>", "The maximum number of bytes that can be " +
"cached by directives in this pool, in aggregate. By default, " + "cached by directives in this pool, in aggregate. By default, " +
"no limit is set."); "no limit is set.");
listing.addRow("<maxTtl>", "The maximum allowed time-to-live for " +
"directives being added to the pool. This can be specified in " +
"seconds, minutes, hours, and days, e.g. 120s, 30m, 4h, 2d. " +
"Valid units are [smhd]. By default, no maximum is set. " +
"This can also be manually specified by \"never\".");
return getShortUsage() + "\n" + return getShortUsage() + "\n" +
"Add a new cache pool.\n\n" + "Add a new cache pool.\n\n" +
listing.toString(); listing.toString();
@ -605,6 +635,18 @@ public class CacheAdmin extends Configured implements Tool {
long limit = Long.parseLong(limitString); long limit = Long.parseLong(limitString);
info.setLimit(limit); info.setLimit(limit);
} }
String maxTtlString = StringUtils.popOptionWithArgument("-maxTtl", args);
try {
Long maxTtl = parseTtlString(maxTtlString);
if (maxTtl != null) {
info.setMaxRelativeExpiryMs(maxTtl);
}
} catch (IOException e) {
System.err.println(
"Error while parsing maxTtl value: " + e.getMessage());
return 1;
}
if (!args.isEmpty()) { if (!args.isEmpty()) {
System.err.print("Can't understand arguments: " + System.err.print("Can't understand arguments: " +
Joiner.on(" ").join(args) + "\n"); Joiner.on(" ").join(args) + "\n");
@ -615,7 +657,8 @@ public class CacheAdmin extends Configured implements Tool {
try { try {
dfs.addCachePool(info); dfs.addCachePool(info);
} catch (IOException e) { } catch (IOException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage()); System.err.println(prettifyException(e));
return 2;
} }
System.out.println("Successfully added cache pool " + name + "."); System.out.println("Successfully added cache pool " + name + ".");
return 0; return 0;
@ -632,7 +675,8 @@ public class CacheAdmin extends Configured implements Tool {
@Override @Override
public String getShortUsage() { public String getShortUsage() {
return "[" + getName() + " <name> [-owner <owner>] " + return "[" + getName() + " <name> [-owner <owner>] " +
"[-group <group>] [-mode <mode>] [-limit <limit>]]\n"; "[-group <group>] [-mode <mode>] [-limit <limit>] " +
"[-maxTtl <maxTtl>]]\n";
} }
@Override @Override
@ -645,6 +689,8 @@ public class CacheAdmin extends Configured implements Tool {
listing.addRow("<mode>", "Unix-style permissions of the pool in octal."); listing.addRow("<mode>", "Unix-style permissions of the pool in octal.");
listing.addRow("<limit>", "Maximum number of bytes that can be cached " + listing.addRow("<limit>", "Maximum number of bytes that can be cached " +
"by this pool."); "by this pool.");
listing.addRow("<maxTtl>", "The maximum allowed time-to-live for " +
"directives being added to the pool.");
return getShortUsage() + "\n" + return getShortUsage() + "\n" +
WordUtils.wrap("Modifies the metadata of an existing cache pool. " + WordUtils.wrap("Modifies the metadata of an existing cache pool. " +
@ -663,6 +709,15 @@ public class CacheAdmin extends Configured implements Tool {
String limitString = StringUtils.popOptionWithArgument("-limit", args); String limitString = StringUtils.popOptionWithArgument("-limit", args);
Long limit = (limitString == null) ? Long limit = (limitString == null) ?
null : Long.parseLong(limitString); null : Long.parseLong(limitString);
String maxTtlString = StringUtils.popOptionWithArgument("-maxTtl", args);
Long maxTtl = null;
try {
maxTtl = parseTtlString(maxTtlString);
} catch (IOException e) {
System.err.println(
"Error while parsing maxTtl value: " + e.getMessage());
return 1;
}
String name = StringUtils.popFirstNonOption(args); String name = StringUtils.popFirstNonOption(args);
if (name == null) { if (name == null) {
System.err.println("You must specify a name when creating a " + System.err.println("You must specify a name when creating a " +
@ -693,6 +748,10 @@ public class CacheAdmin extends Configured implements Tool {
info.setLimit(limit); info.setLimit(limit);
changed = true; changed = true;
} }
if (maxTtl != null) {
info.setMaxRelativeExpiryMs(maxTtl);
changed = true;
}
if (!changed) { if (!changed) {
System.err.println("You must specify at least one attribute to " + System.err.println("You must specify at least one attribute to " +
"change in the cache pool."); "change in the cache pool.");
@ -702,7 +761,8 @@ public class CacheAdmin extends Configured implements Tool {
try { try {
dfs.modifyCachePool(info); dfs.modifyCachePool(info);
} catch (IOException e) { } catch (IOException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage()); System.err.println(prettifyException(e));
return 2;
} }
System.out.print("Successfully modified cache pool " + name); System.out.print("Successfully modified cache pool " + name);
String prefix = " to have "; String prefix = " to have ";
@ -722,6 +782,9 @@ public class CacheAdmin extends Configured implements Tool {
System.out.print(prefix + "limit " + limit); System.out.print(prefix + "limit " + limit);
prefix = " and "; prefix = " and ";
} }
if (maxTtl != null) {
System.out.print(prefix + "max time-to-live " + maxTtlString);
}
System.out.print("\n"); System.out.print("\n");
return 0; return 0;
} }
@ -765,7 +828,8 @@ public class CacheAdmin extends Configured implements Tool {
try { try {
dfs.removeCachePool(name); dfs.removeCachePool(name);
} catch (IOException e) { } catch (IOException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage()); System.err.println(prettifyException(e));
return 2;
} }
System.out.println("Successfully removed cache pool " + name + "."); System.out.println("Successfully removed cache pool " + name + ".");
return 0; return 0;
@ -813,7 +877,8 @@ public class CacheAdmin extends Configured implements Tool {
addField("OWNER", Justification.LEFT). addField("OWNER", Justification.LEFT).
addField("GROUP", Justification.LEFT). addField("GROUP", Justification.LEFT).
addField("MODE", Justification.LEFT). addField("MODE", Justification.LEFT).
addField("LIMIT", Justification.RIGHT); addField("LIMIT", Justification.RIGHT).
addField("MAXTTL", Justification.RIGHT);
if (printStats) { if (printStats) {
builder. builder.
addField("BYTES_NEEDED", Justification.RIGHT). addField("BYTES_NEEDED", Justification.RIGHT).
@ -837,12 +902,23 @@ public class CacheAdmin extends Configured implements Tool {
row.add(info.getMode() != null ? info.getMode().toString() : null); row.add(info.getMode() != null ? info.getMode().toString() : null);
Long limit = info.getLimit(); Long limit = info.getLimit();
String limitString; String limitString;
if (limit != null && limit.equals(CachePool.DEFAULT_LIMIT)) { if (limit != null && limit.equals(CachePoolInfo.LIMIT_UNLIMITED)) {
limitString = "unlimited"; limitString = "unlimited";
} else { } else {
limitString = "" + limit; limitString = "" + limit;
} }
row.add(limitString); row.add(limitString);
Long maxTtl = info.getMaxRelativeExpiryMs();
String maxTtlString = null;
if (maxTtl != null) {
if (maxTtl.longValue() == CachePoolInfo.RELATIVE_EXPIRY_NEVER) {
maxTtlString = "never";
} else {
maxTtlString = DFSUtil.durationToString(maxTtl);
}
}
row.add(maxTtlString);
if (printStats) { if (printStats) {
CachePoolStats stats = entry.getStats(); CachePoolStats stats = entry.getStats();
row.add(Long.toString(stats.getBytesNeeded())); row.add(Long.toString(stats.getBytesNeeded()));
@ -859,7 +935,8 @@ public class CacheAdmin extends Configured implements Tool {
} }
} }
} catch (IOException e) { } catch (IOException e) {
throw new RemoteException(e.getClass().getName(), e.getMessage()); System.err.println(prettifyException(e));
return 2;
} }
System.out.print(String.format("Found %d result%s.\n", numResults, System.out.print(String.format("Found %d result%s.\n", numResults,
(numResults == 1 ? "" : "s"))); (numResults == 1 ? "" : "s")));

View File

@ -434,6 +434,7 @@ message CachePoolInfoProto {
optional string groupName = 3; optional string groupName = 3;
optional int32 mode = 4; optional int32 mode = 4;
optional int64 limit = 5; optional int64 limit = 5;
optional int64 maxRelativeExpiry = 6;
} }
message CachePoolStatsProto { message CachePoolStatsProto {

View File

@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider; import org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider;
import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell;
import org.junit.Assume; import org.junit.Assume;
import org.junit.Before; import org.junit.Before;
@ -730,16 +729,15 @@ public class TestDFSUtil {
@Test(timeout=1000) @Test(timeout=1000)
public void testDurationToString() throws Exception { public void testDurationToString() throws Exception {
assertEquals("000:00:00:00", DFSUtil.durationToString(0)); assertEquals("000:00:00:00.000", DFSUtil.durationToString(0));
try { assertEquals("001:01:01:01.000",
DFSUtil.durationToString(-199);
} catch (IllegalArgumentException e) {
GenericTestUtils.assertExceptionContains("Invalid negative duration", e);
}
assertEquals("001:01:01:01",
DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000)); DFSUtil.durationToString(((24*60*60)+(60*60)+(60)+1)*1000));
assertEquals("000:23:59:59", assertEquals("000:23:59:59.999",
DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000)); DFSUtil.durationToString(((23*60*60)+(59*60)+(59))*1000+999));
assertEquals("-001:01:01:01.000",
DFSUtil.durationToString(-((24*60*60)+(60*60)+(60)+1)*1000));
assertEquals("-000:23:59:59.574",
DFSUtil.durationToString(-(((23*60*60)+(59*60)+(59))*1000+574)));
} }
@Test(timeout=5000) @Test(timeout=5000)
@ -763,7 +761,7 @@ public class TestDFSUtil {
assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m")); assertEquals(61*60*1000, DFSUtil.parseRelativeTime("61m"));
assertEquals(0, DFSUtil.parseRelativeTime("0s")); assertEquals(0, DFSUtil.parseRelativeTime("0s"));
assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h")); assertEquals(25*60*60*1000, DFSUtil.parseRelativeTime("25h"));
assertEquals(4*24*60*60*1000, DFSUtil.parseRelativeTime("4d")); assertEquals(4*24*60*60*1000l, DFSUtil.parseRelativeTime("4d"));
assertEquals(999*24*60*60*1000, DFSUtil.parseRelativeTime("999d")); assertEquals(999*24*60*60*1000l, DFSUtil.parseRelativeTime("999d"));
} }
} }

View File

@ -23,6 +23,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_MAX_LOCKED_MEMOR
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_CACHING_ENABLED_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_PATH_BASED_CACHE_REFRESH_INTERVAL_MS;
import static org.apache.hadoop.hdfs.protocol.CachePoolInfo.RELATIVE_EXPIRY_NEVER;
import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNotNull;
@ -137,6 +139,8 @@ public class TestCacheDirectives {
NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator()); NativeIO.POSIX.setCacheManipulator(new NoMlockCacheManipulator());
LogManager.getLogger(CacheReplicationMonitor.class.getName()).setLevel( LogManager.getLogger(CacheReplicationMonitor.class.getName()).setLevel(
Level.TRACE); Level.TRACE);
LogManager.getLogger(CacheManager.class.getName()).setLevel(
Level.TRACE);
} }
@After @After
@ -1189,4 +1193,185 @@ public class TestCacheDirectives {
new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName()) new CacheDirectiveInfo.Builder().setPool(inadequate.getPoolName())
.setPath(path1).build(), EnumSet.of(CacheFlag.FORCE)); .setPath(path1).build(), EnumSet.of(CacheFlag.FORCE));
} }
@Test(timeout=30000)
public void testMaxRelativeExpiry() throws Exception {
// Test that negative and really big max expirations can't be set during add
try {
dfs.addCachePool(new CachePoolInfo("failpool").setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("negative", e);
}
try {
dfs.addCachePool(new CachePoolInfo("failpool")
.setMaxRelativeExpiryMs(Long.MAX_VALUE - 1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
GenericTestUtils.assertExceptionContains("too big", e);
}
// Test that setting a max relative expiry on a pool works
CachePoolInfo coolPool = new CachePoolInfo("coolPool");
final long poolExpiration = 1000 * 60 * 10l;
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(poolExpiration));
RemoteIterator<CachePoolEntry> poolIt = dfs.listCachePools();
CachePoolInfo listPool = poolIt.next().getInfo();
assertFalse("Should only be one pool", poolIt.hasNext());
assertEquals("Expected max relative expiry to match set value",
poolExpiration, listPool.getMaxRelativeExpiryMs().longValue());
// Test that negative and really big max expirations can't be modified
try {
dfs.addCachePool(coolPool.setMaxRelativeExpiryMs(-1l));
fail("Added a pool with a negative max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("negative", e);
}
try {
dfs.modifyCachePool(coolPool
.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER+1));
fail("Added a pool with too big of a max expiry.");
} catch (InvalidRequestException e) {
assertExceptionContains("too big", e);
}
// Test that adding a directives without an expiration uses the pool's max
CacheDirectiveInfo defaultExpiry = new CacheDirectiveInfo.Builder()
.setPath(new Path("/blah"))
.setPool(coolPool.getPoolName())
.build();
dfs.addCacheDirective(defaultExpiry);
RemoteIterator<CacheDirectiveEntry> dirIt =
dfs.listCacheDirectives(defaultExpiry);
CacheDirectiveInfo listInfo = dirIt.next().getInfo();
assertFalse("Should only have one entry in listing", dirIt.hasNext());
long listExpiration = listInfo.getExpiration().getAbsoluteMillis()
- new Date().getTime();
assertTrue("Directive expiry should be approximately the pool's max expiry",
Math.abs(listExpiration - poolExpiration) < 10*1000);
// Test that the max is enforced on add for relative and absolute
CacheDirectiveInfo.Builder builder = new CacheDirectiveInfo.Builder()
.setPath(new Path("/lolcat"))
.setPool(coolPool.getPoolName());
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newRelative(poolExpiration+1))
.build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newAbsolute(
new Date().getTime() + poolExpiration + (10*1000)))
.build());
fail("Added a directive that exceeds pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test that max is enforced on modify for relative and absolute Expirations
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.newRelative(poolExpiration+1))
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.newAbsolute(
new Date().getTime() + poolExpiration + (10*1000)))
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
// Test some giant limit values with add
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newRelative(
Long.MAX_VALUE))
.build());
fail("Added a directive with a gigantic max value");
} catch (IllegalArgumentException e) {
assertExceptionContains("is too far in the future", e);
}
try {
dfs.addCacheDirective(builder
.setExpiration(Expiration.newAbsolute(
Long.MAX_VALUE))
.build());
fail("Added a directive with a gigantic max value");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test some giant limit values with modify
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.NEVER)
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setExpiration(Expiration.newAbsolute(
Long.MAX_VALUE))
.build());
fail("Modified a directive to exceed pool's max relative expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("is too far in the future", e);
}
// Test that the max is enforced on modify correctly when changing pools
CachePoolInfo destPool = new CachePoolInfo("destPool");
dfs.addCachePool(destPool.setMaxRelativeExpiryMs(poolExpiration / 2));
try {
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setPool(destPool.getPoolName())
.build());
fail("Modified a directive to a pool with a lower max expiration");
} catch (InvalidRequestException e) {
assertExceptionContains("exceeds the max relative expiration", e);
}
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(defaultExpiry)
.setId(listInfo.getId())
.setPool(destPool.getPoolName())
.setExpiration(Expiration.newRelative(poolExpiration / 2))
.build());
dirIt = dfs.listCacheDirectives(new CacheDirectiveInfo.Builder()
.setPool(destPool.getPoolName())
.build());
listInfo = dirIt.next().getInfo();
listExpiration = listInfo.getExpiration().getAbsoluteMillis()
- new Date().getTime();
assertTrue("Unexpected relative expiry " + listExpiration
+ " expected approximately " + poolExpiration/2,
Math.abs(poolExpiration/2 - listExpiration) < 10*1000);
// Test that cache pool and directive expiry can be modified back to never
dfs.modifyCachePool(destPool
.setMaxRelativeExpiryMs(CachePoolInfo.RELATIVE_EXPIRY_NEVER));
poolIt = dfs.listCachePools();
listPool = poolIt.next().getInfo();
while (!listPool.getPoolName().equals(destPool.getPoolName())) {
listPool = poolIt.next().getInfo();
}
assertEquals("Expected max relative expiry to match set value",
CachePoolInfo.RELATIVE_EXPIRY_NEVER,
listPool.getMaxRelativeExpiryMs().longValue());
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder()
.setId(listInfo.getId())
.setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER))
.build());
// Test modifying close to the limit
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder()
.setId(listInfo.getId())
.setExpiration(Expiration.newRelative(RELATIVE_EXPIRY_NEVER - 1))
.build());
}
} }

View File

@ -13,8 +13,8 @@
<TXID>2</TXID> <TXID>2</TXID>
<DELEGATION_KEY> <DELEGATION_KEY>
<KEY_ID>1</KEY_ID> <KEY_ID>1</KEY_ID>
<EXPIRY_DATE>1387701670577</EXPIRY_DATE> <EXPIRY_DATE>1388171826188</EXPIRY_DATE>
<KEY>7bb5467995769b59</KEY> <KEY>c7d869c22c8afce1</KEY>
</DELEGATION_KEY> </DELEGATION_KEY>
</DATA> </DATA>
</RECORD> </RECORD>
@ -24,8 +24,8 @@
<TXID>3</TXID> <TXID>3</TXID>
<DELEGATION_KEY> <DELEGATION_KEY>
<KEY_ID>2</KEY_ID> <KEY_ID>2</KEY_ID>
<EXPIRY_DATE>1387701670580</EXPIRY_DATE> <EXPIRY_DATE>1388171826191</EXPIRY_DATE>
<KEY>a5a3a2755e36827b</KEY> <KEY>a3c41446507dfca9</KEY>
</DELEGATION_KEY> </DELEGATION_KEY>
</DATA> </DATA>
</RECORD> </RECORD>
@ -37,17 +37,17 @@
<INODEID>16386</INODEID> <INODEID>16386</INODEID>
<PATH>/file_create_u\0001;F431</PATH> <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471220</MTIME> <MTIME>1387480626844</MTIME>
<ATIME>1387010471220</ATIME> <ATIME>1387480626844</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_-52011019_1</CLIENT_NAME> <CLIENT_NAME>DFSClient_NONMAPREDUCE_1147796111_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE> <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE> <MODE>420</MODE>
</PERMISSION_STATUS> </PERMISSION_STATUS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>7</RPC_CALLID> <RPC_CALLID>7</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -59,8 +59,8 @@
<INODEID>0</INODEID> <INODEID>0</INODEID>
<PATH>/file_create_u\0001;F431</PATH> <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471276</MTIME> <MTIME>1387480626885</MTIME>
<ATIME>1387010471220</ATIME> <ATIME>1387480626844</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME> <CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE> <CLIENT_MACHINE></CLIENT_MACHINE>
@ -78,8 +78,8 @@
<LENGTH>0</LENGTH> <LENGTH>0</LENGTH>
<SRC>/file_create_u\0001;F431</SRC> <SRC>/file_create_u\0001;F431</SRC>
<DST>/file_moved</DST> <DST>/file_moved</DST>
<TIMESTAMP>1387010471286</TIMESTAMP> <TIMESTAMP>1387480626894</TIMESTAMP>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>9</RPC_CALLID> <RPC_CALLID>9</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -89,8 +89,8 @@
<TXID>7</TXID> <TXID>7</TXID>
<LENGTH>0</LENGTH> <LENGTH>0</LENGTH>
<PATH>/file_moved</PATH> <PATH>/file_moved</PATH>
<TIMESTAMP>1387010471299</TIMESTAMP> <TIMESTAMP>1387480626905</TIMESTAMP>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>10</RPC_CALLID> <RPC_CALLID>10</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -101,7 +101,7 @@
<LENGTH>0</LENGTH> <LENGTH>0</LENGTH>
<INODEID>16387</INODEID> <INODEID>16387</INODEID>
<PATH>/directory_mkdir</PATH> <PATH>/directory_mkdir</PATH>
<TIMESTAMP>1387010471312</TIMESTAMP> <TIMESTAMP>1387480626917</TIMESTAMP>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
@ -136,7 +136,7 @@
<TXID>12</TXID> <TXID>12</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT> <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot1</SNAPSHOTNAME> <SNAPSHOTNAME>snapshot1</SNAPSHOTNAME>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>15</RPC_CALLID> <RPC_CALLID>15</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -147,7 +147,7 @@
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT> <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME> <SNAPSHOTOLDNAME>snapshot1</SNAPSHOTOLDNAME>
<SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME> <SNAPSHOTNEWNAME>snapshot2</SNAPSHOTNEWNAME>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>16</RPC_CALLID> <RPC_CALLID>16</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -157,7 +157,7 @@
<TXID>14</TXID> <TXID>14</TXID>
<SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT> <SNAPSHOTROOT>/directory_mkdir</SNAPSHOTROOT>
<SNAPSHOTNAME>snapshot2</SNAPSHOTNAME> <SNAPSHOTNAME>snapshot2</SNAPSHOTNAME>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>17</RPC_CALLID> <RPC_CALLID>17</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -169,17 +169,17 @@
<INODEID>16388</INODEID> <INODEID>16388</INODEID>
<PATH>/file_create_u\0001;F431</PATH> <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471373</MTIME> <MTIME>1387480626978</MTIME>
<ATIME>1387010471373</ATIME> <ATIME>1387480626978</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_-52011019_1</CLIENT_NAME> <CLIENT_NAME>DFSClient_NONMAPREDUCE_1147796111_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE> <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE> <MODE>420</MODE>
</PERMISSION_STATUS> </PERMISSION_STATUS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>18</RPC_CALLID> <RPC_CALLID>18</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -191,8 +191,8 @@
<INODEID>0</INODEID> <INODEID>0</INODEID>
<PATH>/file_create_u\0001;F431</PATH> <PATH>/file_create_u\0001;F431</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471380</MTIME> <MTIME>1387480626985</MTIME>
<ATIME>1387010471373</ATIME> <ATIME>1387480626978</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME> <CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE> <CLIENT_MACHINE></CLIENT_MACHINE>
@ -253,9 +253,9 @@
<LENGTH>0</LENGTH> <LENGTH>0</LENGTH>
<SRC>/file_create_u\0001;F431</SRC> <SRC>/file_create_u\0001;F431</SRC>
<DST>/file_moved</DST> <DST>/file_moved</DST>
<TIMESTAMP>1387010471428</TIMESTAMP> <TIMESTAMP>1387480627035</TIMESTAMP>
<OPTIONS>NONE</OPTIONS> <OPTIONS>NONE</OPTIONS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>25</RPC_CALLID> <RPC_CALLID>25</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -267,17 +267,17 @@
<INODEID>16389</INODEID> <INODEID>16389</INODEID>
<PATH>/file_concat_target</PATH> <PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471438</MTIME> <MTIME>1387480627043</MTIME>
<ATIME>1387010471438</ATIME> <ATIME>1387480627043</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_-52011019_1</CLIENT_NAME> <CLIENT_NAME>DFSClient_NONMAPREDUCE_1147796111_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE> <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE> <MODE>420</MODE>
</PERMISSION_STATUS> </PERMISSION_STATUS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>27</RPC_CALLID> <RPC_CALLID>27</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -388,8 +388,8 @@
<INODEID>0</INODEID> <INODEID>0</INODEID>
<PATH>/file_concat_target</PATH> <PATH>/file_concat_target</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471540</MTIME> <MTIME>1387480627148</MTIME>
<ATIME>1387010471438</ATIME> <ATIME>1387480627043</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME> <CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE> <CLIENT_MACHINE></CLIENT_MACHINE>
@ -423,17 +423,17 @@
<INODEID>16390</INODEID> <INODEID>16390</INODEID>
<PATH>/file_concat_0</PATH> <PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471547</MTIME> <MTIME>1387480627155</MTIME>
<ATIME>1387010471547</ATIME> <ATIME>1387480627155</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_-52011019_1</CLIENT_NAME> <CLIENT_NAME>DFSClient_NONMAPREDUCE_1147796111_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE> <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE> <MODE>420</MODE>
</PERMISSION_STATUS> </PERMISSION_STATUS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>40</RPC_CALLID> <RPC_CALLID>40</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -544,8 +544,8 @@
<INODEID>0</INODEID> <INODEID>0</INODEID>
<PATH>/file_concat_0</PATH> <PATH>/file_concat_0</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471588</MTIME> <MTIME>1387480627193</MTIME>
<ATIME>1387010471547</ATIME> <ATIME>1387480627155</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME> <CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE> <CLIENT_MACHINE></CLIENT_MACHINE>
@ -579,17 +579,17 @@
<INODEID>16391</INODEID> <INODEID>16391</INODEID>
<PATH>/file_concat_1</PATH> <PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471595</MTIME> <MTIME>1387480627200</MTIME>
<ATIME>1387010471595</ATIME> <ATIME>1387480627200</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_-52011019_1</CLIENT_NAME> <CLIENT_NAME>DFSClient_NONMAPREDUCE_1147796111_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE> <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE> <MODE>420</MODE>
</PERMISSION_STATUS> </PERMISSION_STATUS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>52</RPC_CALLID> <RPC_CALLID>52</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -700,8 +700,8 @@
<INODEID>0</INODEID> <INODEID>0</INODEID>
<PATH>/file_concat_1</PATH> <PATH>/file_concat_1</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471651</MTIME> <MTIME>1387480627238</MTIME>
<ATIME>1387010471595</ATIME> <ATIME>1387480627200</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME> <CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE> <CLIENT_MACHINE></CLIENT_MACHINE>
@ -733,12 +733,12 @@
<TXID>56</TXID> <TXID>56</TXID>
<LENGTH>0</LENGTH> <LENGTH>0</LENGTH>
<TRG>/file_concat_target</TRG> <TRG>/file_concat_target</TRG>
<TIMESTAMP>1387010471663</TIMESTAMP> <TIMESTAMP>1387480627246</TIMESTAMP>
<SOURCES> <SOURCES>
<SOURCE1>/file_concat_0</SOURCE1> <SOURCE1>/file_concat_0</SOURCE1>
<SOURCE2>/file_concat_1</SOURCE2> <SOURCE2>/file_concat_1</SOURCE2>
</SOURCES> </SOURCES>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>63</RPC_CALLID> <RPC_CALLID>63</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -750,14 +750,14 @@
<INODEID>16392</INODEID> <INODEID>16392</INODEID>
<PATH>/file_symlink</PATH> <PATH>/file_symlink</PATH>
<VALUE>/file_concat_target</VALUE> <VALUE>/file_concat_target</VALUE>
<MTIME>1387010471674</MTIME> <MTIME>1387480627255</MTIME>
<ATIME>1387010471674</ATIME> <ATIME>1387480627255</ATIME>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
<MODE>511</MODE> <MODE>511</MODE>
</PERMISSION_STATUS> </PERMISSION_STATUS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>64</RPC_CALLID> <RPC_CALLID>64</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -771,11 +771,11 @@
<OWNER>andrew</OWNER> <OWNER>andrew</OWNER>
<RENEWER>JobTracker</RENEWER> <RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER> <REALUSER></REALUSER>
<ISSUE_DATE>1387010471682</ISSUE_DATE> <ISSUE_DATE>1387480627262</ISSUE_DATE>
<MAX_DATE>1387615271682</MAX_DATE> <MAX_DATE>1388085427262</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID> <MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER> </DELEGATION_TOKEN_IDENTIFIER>
<EXPIRY_TIME>1387096871682</EXPIRY_TIME> <EXPIRY_TIME>1387567027262</EXPIRY_TIME>
</DATA> </DATA>
</RECORD> </RECORD>
<RECORD> <RECORD>
@ -788,11 +788,11 @@
<OWNER>andrew</OWNER> <OWNER>andrew</OWNER>
<RENEWER>JobTracker</RENEWER> <RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER> <REALUSER></REALUSER>
<ISSUE_DATE>1387010471682</ISSUE_DATE> <ISSUE_DATE>1387480627262</ISSUE_DATE>
<MAX_DATE>1387615271682</MAX_DATE> <MAX_DATE>1388085427262</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID> <MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER> </DELEGATION_TOKEN_IDENTIFIER>
<EXPIRY_TIME>1387096871717</EXPIRY_TIME> <EXPIRY_TIME>1387567027281</EXPIRY_TIME>
</DATA> </DATA>
</RECORD> </RECORD>
<RECORD> <RECORD>
@ -805,8 +805,8 @@
<OWNER>andrew</OWNER> <OWNER>andrew</OWNER>
<RENEWER>JobTracker</RENEWER> <RENEWER>JobTracker</RENEWER>
<REALUSER></REALUSER> <REALUSER></REALUSER>
<ISSUE_DATE>1387010471682</ISSUE_DATE> <ISSUE_DATE>1387480627262</ISSUE_DATE>
<MAX_DATE>1387615271682</MAX_DATE> <MAX_DATE>1388085427262</MAX_DATE>
<MASTER_KEY_ID>2</MASTER_KEY_ID> <MASTER_KEY_ID>2</MASTER_KEY_ID>
</DELEGATION_TOKEN_IDENTIFIER> </DELEGATION_TOKEN_IDENTIFIER>
</DATA> </DATA>
@ -820,7 +820,8 @@
<GROUPNAME>andrew</GROUPNAME> <GROUPNAME>andrew</GROUPNAME>
<MODE>493</MODE> <MODE>493</MODE>
<LIMIT>9223372036854775807</LIMIT> <LIMIT>9223372036854775807</LIMIT>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <MAXRELATIVEEXPIRY>2305843009213693951</MAXRELATIVEEXPIRY>
<RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>68</RPC_CALLID> <RPC_CALLID>68</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -833,7 +834,7 @@
<GROUPNAME>party</GROUPNAME> <GROUPNAME>party</GROUPNAME>
<MODE>448</MODE> <MODE>448</MODE>
<LIMIT>1989</LIMIT> <LIMIT>1989</LIMIT>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>69</RPC_CALLID> <RPC_CALLID>69</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -845,8 +846,8 @@
<PATH>/bar</PATH> <PATH>/bar</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<POOL>poolparty</POOL> <POOL>poolparty</POOL>
<EXPIRATION>-1</EXPIRATION> <EXPIRATION>2305844396694321272</EXPIRATION>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>70</RPC_CALLID> <RPC_CALLID>70</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -856,7 +857,7 @@
<TXID>64</TXID> <TXID>64</TXID>
<ID>1</ID> <ID>1</ID>
<PATH>/bar2</PATH> <PATH>/bar2</PATH>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>71</RPC_CALLID> <RPC_CALLID>71</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -865,7 +866,7 @@
<DATA> <DATA>
<TXID>65</TXID> <TXID>65</TXID>
<ID>1</ID> <ID>1</ID>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>72</RPC_CALLID> <RPC_CALLID>72</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -874,7 +875,7 @@
<DATA> <DATA>
<TXID>66</TXID> <TXID>66</TXID>
<POOLNAME>poolparty</POOLNAME> <POOLNAME>poolparty</POOLNAME>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>73</RPC_CALLID> <RPC_CALLID>73</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -886,17 +887,17 @@
<INODEID>16393</INODEID> <INODEID>16393</INODEID>
<PATH>/hard-lease-recovery-test</PATH> <PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010471802</MTIME> <MTIME>1387480627356</MTIME>
<ATIME>1387010471802</ATIME> <ATIME>1387480627356</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME>DFSClient_NONMAPREDUCE_-52011019_1</CLIENT_NAME> <CLIENT_NAME>DFSClient_NONMAPREDUCE_1147796111_1</CLIENT_NAME>
<CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE> <CLIENT_MACHINE>127.0.0.1</CLIENT_MACHINE>
<PERMISSION_STATUS> <PERMISSION_STATUS>
<USERNAME>andrew</USERNAME> <USERNAME>andrew</USERNAME>
<GROUPNAME>supergroup</GROUPNAME> <GROUPNAME>supergroup</GROUPNAME>
<MODE>420</MODE> <MODE>420</MODE>
</PERMISSION_STATUS> </PERMISSION_STATUS>
<RPC_CLIENTID>508263bb-692e-4439-8738-ff89b8b03923</RPC_CLIENTID> <RPC_CLIENTID>a90261a0-3759-4480-ba80-e10c9ae331e6</RPC_CLIENTID>
<RPC_CALLID>74</RPC_CALLID> <RPC_CALLID>74</RPC_CALLID>
</DATA> </DATA>
</RECORD> </RECORD>
@ -953,7 +954,7 @@
<OPCODE>OP_REASSIGN_LEASE</OPCODE> <OPCODE>OP_REASSIGN_LEASE</OPCODE>
<DATA> <DATA>
<TXID>73</TXID> <TXID>73</TXID>
<LEASEHOLDER>DFSClient_NONMAPREDUCE_-52011019_1</LEASEHOLDER> <LEASEHOLDER>DFSClient_NONMAPREDUCE_1147796111_1</LEASEHOLDER>
<PATH>/hard-lease-recovery-test</PATH> <PATH>/hard-lease-recovery-test</PATH>
<NEWHOLDER>HDFS_NameNode</NEWHOLDER> <NEWHOLDER>HDFS_NameNode</NEWHOLDER>
</DATA> </DATA>
@ -966,8 +967,8 @@
<INODEID>0</INODEID> <INODEID>0</INODEID>
<PATH>/hard-lease-recovery-test</PATH> <PATH>/hard-lease-recovery-test</PATH>
<REPLICATION>1</REPLICATION> <REPLICATION>1</REPLICATION>
<MTIME>1387010474126</MTIME> <MTIME>1387480629729</MTIME>
<ATIME>1387010471802</ATIME> <ATIME>1387480627356</ATIME>
<BLOCKSIZE>512</BLOCKSIZE> <BLOCKSIZE>512</BLOCKSIZE>
<CLIENT_NAME></CLIENT_NAME> <CLIENT_NAME></CLIENT_NAME>
<CLIENT_MACHINE></CLIENT_MACHINE> <CLIENT_MACHINE></CLIENT_MACHINE>

View File

@ -417,11 +417,11 @@
</comparator> </comparator>
<comparator> <comparator>
<type>SubstringComparator</type> <type>SubstringComparator</type>
<expected-output>bar alice alicegroup rwxr-xr-x unlimited 0 0 0 0 0</expected-output> <expected-output>bar alice alicegroup rwxr-xr-x unlimited never 0 0 0 0 0</expected-output>
</comparator> </comparator>
<comparator> <comparator>
<type>SubstringComparator</type> <type>SubstringComparator</type>
<expected-output>foo bob bob rw-rw-r-- unlimited 0 0 0 0 0</expected-output> <expected-output>foo bob bob rw-rw-r-- unlimited never 0 0 0 0 0</expected-output>
</comparator> </comparator>
</comparators> </comparators>
</test> </test>
@ -457,5 +457,37 @@
</comparator> </comparator>
</comparators> </comparators>
</test> </test>
<test> <!--Tested -->
<description>Testing pool max ttl settings</description>
<test-commands>
<cache-admin-command>-addPool pool1 -owner andrew -group andrew</cache-admin-command>
<cache-admin-command>-addPool pool2 -owner andrew -group andrew -maxTtl 999d</cache-admin-command>
<cache-admin-command>-modifyPool pool2 -maxTtl never</cache-admin-command>
<cache-admin-command>-addPool pool3 -owner andrew -group andrew -maxTtl 4h</cache-admin-command>
<cache-admin-command>-listPools</cache-admin-command>
</test-commands>
<cleanup-commands>
<cache-admin-command>-removePool pool1</cache-admin-command>
</cleanup-commands>
<comparators>
<comparator>
<type>SubstringComparator</type>
<expected-output>Found 3 results</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>pool1 andrew andrew rwxr-xr-x unlimited never</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>pool2 andrew andrew rwxr-xr-x unlimited never</expected-output>
</comparator>
<comparator>
<type>SubstringComparator</type>
<expected-output>pool3 andrew andrew rwxr-xr-x unlimited 000:04:00:00.000</expected-output>
</comparator>
</comparators>
</test>
</tests> </tests>
</configuration> </configuration>