HDFS-5163. Miscellaneous cache pool RPC fixes (Contributed by Colin Patrick McCabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1520665 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-09-06 18:52:50 +00:00
parent d56d0b46e1
commit f41f8b8842
19 changed files with 788 additions and 797 deletions

View File

@ -28,13 +28,16 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
public interface BatchedEntries<E> { public interface BatchedEntries<E> {
public E get(int i); public E get(int i);
public int size(); public int size();
public boolean hasMore();
} }
public static class BatchedListEntries<E> implements BatchedEntries<E> { public static class BatchedListEntries<E> implements BatchedEntries<E> {
private final List<E> entries; private final List<E> entries;
private final boolean hasMore;
public BatchedListEntries(List<E> entries) { public BatchedListEntries(List<E> entries, boolean hasMore) {
this.entries = entries; this.entries = entries;
this.hasMore = hasMore;
} }
public E get(int i) { public E get(int i) {
@ -44,16 +47,18 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
public int size() { public int size() {
return entries.size(); return entries.size();
} }
public boolean hasMore() {
return hasMore;
}
} }
private K prevKey; private K prevKey;
private final int maxRepliesPerRequest;
private BatchedEntries<E> entries; private BatchedEntries<E> entries;
private int idx; private int idx;
public BatchedRemoteIterator(K prevKey, int maxRepliesPerRequest) { public BatchedRemoteIterator(K prevKey) {
this.prevKey = prevKey; this.prevKey = prevKey;
this.maxRepliesPerRequest = maxRepliesPerRequest;
this.entries = null; this.entries = null;
this.idx = -1; this.idx = -1;
} }
@ -62,21 +67,14 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
* Perform the actual remote request. * Perform the actual remote request.
* *
* @param key The key to send. * @param key The key to send.
* @param maxRepliesPerRequest The maximum number of replies to allow.
* @return A list of replies. * @return A list of replies.
*/ */
public abstract BatchedEntries<E> makeRequest(K prevKey, public abstract BatchedEntries<E> makeRequest(K prevKey) throws IOException;
int maxRepliesPerRequest) throws IOException;
private void makeRequest() throws IOException { private void makeRequest() throws IOException {
idx = 0; idx = 0;
entries = null; entries = null;
entries = makeRequest(prevKey, maxRepliesPerRequest); entries = makeRequest(prevKey);
if (entries.size() > maxRepliesPerRequest) {
throw new IOException("invalid number of replies returned: got " +
entries.size() + ", expected " + maxRepliesPerRequest +
" at most.");
}
if (entries.size() == 0) { if (entries.size() == 0) {
entries = null; entries = null;
} }
@ -86,7 +84,7 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
if (idx == -1) { if (idx == -1) {
makeRequest(); makeRequest();
} else if ((entries != null) && (idx >= entries.size())) { } else if ((entries != null) && (idx >= entries.size())) {
if (entries.size() < maxRepliesPerRequest) { if (!entries.hasMore()) {
// Last time, we got fewer entries than requested. // Last time, we got fewer entries than requested.
// So we should be at the end. // So we should be at the end.
entries = null; entries = null;

View File

@ -303,6 +303,13 @@ public class FsPermission implements Writable {
return new FsPermission((short)00666); return new FsPermission((short)00666);
} }
/**
* Get the default permission for cache pools.
*/
public static FsPermission getCachePoolDefault() {
return new FsPermission((short)00755);
}
/** /**
* Create a FsPermission from a Unix symbolic permission string * Create a FsPermission from a Unix symbolic permission string
* @param unixSymbolicPermission e.g. "-rw-rw-rw-" * @param unixSymbolicPermission e.g. "-rw-rw-rw-"

View File

@ -1253,6 +1253,14 @@ public class UserGroupInformation {
return null; return null;
} }
public String getPrimaryGroupName() throws IOException {
String[] groups = getGroupNames();
if (groups.length == 0) {
throw new IOException("There is no primary group for UGI " + this);
}
return groups[0];
}
/** /**
* Get the user's full principal name. * Get the user's full principal name.
* @return the user's full principal name. * @return the user's full principal name.

View File

@ -24,6 +24,9 @@ HDFS-4949 (Unreleased)
HDFS-5121. Add RPCs for creating and manipulating cache pools. HDFS-5121. Add RPCs for creating and manipulating cache pools.
(Contributed by Colin Patrick McCabe) (Contributed by Colin Patrick McCabe)
HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin
Patrick McCabe)
OPTIMIZATIONS OPTIMIZATIONS

View File

@ -195,6 +195,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive"; public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000; public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
public static final String DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES =
"dfs.namenode.list.cache.pools.num.responses";
public static final int DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT = 100;
public static final String DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES =
"dfs.namenode.list.cache.directives.num.responses";
public static final int DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT = 100;
// Whether to enable datanode's stale state detection and usage for reads // Whether to enable datanode's stale state detection and usage for reads
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode"; public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false; public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;

View File

@ -56,12 +56,12 @@ public abstract class AddPathCacheDirectiveException extends IOException {
} }
} }
public static class InvalidPoolError public static class InvalidPoolNameError
extends AddPathCacheDirectiveException { extends AddPathCacheDirectiveException {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
public InvalidPoolError(PathCacheDirective directive) { public InvalidPoolNameError(PathCacheDirective directive) {
super("invalid pool id " + directive.getPoolId(), directive); super("invalid pool name '" + directive.getPool() + "'", directive);
} }
} }
@ -70,7 +70,7 @@ public abstract class AddPathCacheDirectiveException extends IOException {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
public PoolWritePermissionDeniedError(PathCacheDirective directive) { public PoolWritePermissionDeniedError(PathCacheDirective directive) {
super("write permission denied for pool id " + directive.getPoolId(), super("write permission denied for pool '" + directive.getPool() + "'",
directive); directive);
} }
} }
@ -82,9 +82,7 @@ public abstract class AddPathCacheDirectiveException extends IOException {
public UnexpectedAddPathCacheDirectiveException( public UnexpectedAddPathCacheDirectiveException(
PathCacheDirective directive) { PathCacheDirective directive) {
super("encountered an unexpected error when trying to " + super("encountered an unexpected error when trying to " +
"add path cache directive to pool id " + directive.getPoolId() + "add path cache directive " + directive, directive);
" " + directive,
directive);
} }
} }
}; };

View File

@ -18,45 +18,38 @@
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import javax.annotation.Nullable;
import org.apache.commons.lang.builder.EqualsBuilder; import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import com.google.common.base.Preconditions;
/** /**
* Information about a cache pool. * Information about a cache pool.
*
* CachePoolInfo permissions roughly map to Unix file permissions.
* Write permissions allow addition and removal of a {@link PathCacheEntry} from
* the pool. Execute permissions allow listing of PathCacheEntries in a pool.
* Read permissions have no associated meaning.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class CachePoolInfo { public class CachePoolInfo {
final String poolName;
private String poolName; @Nullable
private String ownerName; String ownerName;
private String groupName;
private FsPermission mode;
private Integer weight;
/** @Nullable
* For Builder use String groupName;
*/
private CachePoolInfo() {} @Nullable
FsPermission mode;
@Nullable
Integer weight;
/**
* Use a CachePoolInfo {@link Builder} to create a new CachePoolInfo with
* more parameters
*/
public CachePoolInfo(String poolName) { public CachePoolInfo(String poolName) {
this.poolName = poolName; this.poolName = poolName;
} }
public String getPoolName() { public String getPoolName() {
return poolName; return poolName;
} }
@ -65,103 +58,73 @@ public class CachePoolInfo {
return ownerName; return ownerName;
} }
public CachePoolInfo setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
public String getGroupName() { public String getGroupName() {
return groupName; return groupName;
} }
public CachePoolInfo setGroupName(String groupName) {
this.groupName = groupName;
return this;
}
public FsPermission getMode() { public FsPermission getMode() {
return mode; return mode;
} }
public CachePoolInfo setMode(FsPermission mode) {
this.mode = mode;
return this;
}
public Integer getWeight() { public Integer getWeight() {
return weight; return weight;
} }
public CachePoolInfo setWeight(Integer weight) {
this.weight = weight;
return this;
}
public String toString() { public String toString() {
return new StringBuilder(). return new StringBuilder().append("{").
append("{ ").append("poolName:").append(poolName). append("poolName:").append(poolName).
append(", ownerName:").append(ownerName). append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName). append(", groupName:").append(groupName).
append(", mode:").append(mode). append(", mode:").append((mode == null) ? "null" :
append(", weight:").append(weight). String.format("0%03o", mode)).
append(" }").toString(); append(", weight:").append(weight).
append("}").toString();
}
@Override
public boolean equals(Object o) {
try {
CachePoolInfo other = (CachePoolInfo)o;
return new EqualsBuilder().
append(poolName, other.poolName).
append(ownerName, other.ownerName).
append(groupName, other.groupName).
append(mode, other.mode).
append(weight, other.weight).
isEquals();
} catch (ClassCastException e) {
return false;
}
} }
@Override @Override
public int hashCode() { public int hashCode() {
return new HashCodeBuilder().append(poolName).append(ownerName) return new HashCodeBuilder().
.append(groupName).append(mode.toShort()).append(weight).hashCode(); append(poolName).
append(ownerName).
append(groupName).
append(mode).
append(weight).
hashCode();
} }
}
@Override
public boolean equals(Object obj) {
if (obj == null) { return false; }
if (obj == this) { return true; }
if (obj.getClass() != getClass()) {
return false;
}
CachePoolInfo rhs = (CachePoolInfo)obj;
return new EqualsBuilder()
.append(poolName, rhs.poolName)
.append(ownerName, rhs.ownerName)
.append(groupName, rhs.groupName)
.append(mode, rhs.mode)
.append(weight, rhs.weight)
.isEquals();
}
public static Builder newBuilder() {
return new Builder();
}
public static Builder newBuilder(CachePoolInfo info) {
return new Builder(info);
}
/**
* CachePoolInfo Builder
*/
public static class Builder {
private CachePoolInfo info;
public Builder() {
this.info = new CachePoolInfo();
}
public Builder(CachePoolInfo info) {
this.info = info;
}
public CachePoolInfo build() {
Preconditions.checkNotNull(info.poolName,
"Cannot create a CachePoolInfo without a pool name");
return info;
}
public Builder setPoolName(String poolName) {
info.poolName = poolName;
return this;
}
public Builder setOwnerName(String ownerName) {
info.ownerName = ownerName;
return this;
}
public Builder setGroupName(String groupName) {
info.groupName = groupName;
return this;
}
public Builder setMode(FsPermission mode) {
info.mode = mode;
return this;
}
public Builder setWeight(Integer weight) {
info.weight = weight;
return this;
}
}
}

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -1107,8 +1106,9 @@ public interface ClientProtocol {
* could not be added. * could not be added.
*/ */
@AtMostOnce @AtMostOnce
public List<Fallible<PathCacheEntry>> addPathCacheDirectives( public List<Fallible<PathCacheEntry>>
List<PathCacheDirective> directives) throws IOException; addPathCacheDirectives(List<PathCacheDirective> directives)
throws IOException;
/** /**
* Remove some path cache entries from the CacheManager. * Remove some path cache entries from the CacheManager.
@ -1117,7 +1117,7 @@ public interface ClientProtocol {
* @return A Fallible list where each element is either a successfully removed * @return A Fallible list where each element is either a successfully removed
* ID, or an IOException describing why the ID could not be removed. * ID, or an IOException describing why the ID could not be removed.
*/ */
@Idempotent @AtMostOnce
public List<Fallible<Long>> removePathCacheEntries(List<Long> ids) public List<Fallible<Long>> removePathCacheEntries(List<Long> ids)
throws IOException; throws IOException;
@ -1127,15 +1127,13 @@ public interface ClientProtocol {
* *
* @param prevId The last listed entry ID, or -1 if this is the first call to * @param prevId The last listed entry ID, or -1 if this is the first call to
* listPathCacheEntries. * listPathCacheEntries.
* @param pool The cache pool to list, or -1 to list all pools * @param pool The cache pool to list, or the empty string to list all pools
* @param maxRepliesPerRequest The maximum number of entries to return per
* request
* @return A RemoteIterator which returns PathCacheEntry objects. * @return A RemoteIterator which returns PathCacheEntry objects.
*/ */
@Idempotent @Idempotent
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId, public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
long poolId, int maxRepliesPerRequest) throws IOException; String pool) throws IOException;
/** /**
* Add a new cache pool. * Add a new cache pool.
* *
@ -1143,39 +1141,37 @@ public interface ClientProtocol {
* @throws IOException If the request could not be completed. * @throws IOException If the request could not be completed.
*/ */
@AtMostOnce @AtMostOnce
public CachePool addCachePool(CachePoolInfo info) throws IOException; public void addCachePool(CachePoolInfo info) throws IOException;
/** /**
* Modify a cache pool, e.g. pool name, permissions, owner, group. * Modify a cache pool.
* *
* @param poolId ID of the cache pool to modify * @param req
* @param info New metadata for the cache pool * The request to modify a cache pool.
* @throws IOException If the request could not be completed. * @throws IOException
* If the request could not be completed.
*/ */
@AtMostOnce @AtMostOnce
public void modifyCachePool(long poolId, CachePoolInfo info) public void modifyCachePool(CachePoolInfo req) throws IOException;
throws IOException;
/** /**
* Remove a cache pool. * Remove a cache pool.
* *
* @param poolId ID of the cache pool to remove. * @param pool name of the cache pool to remove.
* @throws IOException if the cache pool did not exist, or could not be * @throws IOException if the cache pool did not exist, or could not be
* removed. * removed.
*/ */
@Idempotent @AtMostOnce
public void removeCachePool(long poolId) throws IOException; public void removeCachePool(String pool) throws IOException;
/** /**
* List the set of cache pools. Incrementally fetches results from the server. * List the set of cache pools. Incrementally fetches results from the server.
* *
* @param prevPoolId ID of the last pool listed, or -1 if this is the first * @param prevPool name of the last pool listed, or the empty string if this is
* invocation of listCachePools * the first invocation of listCachePools
* @param maxRepliesPerRequest Maximum number of cache pools to return per
* server request.
* @return A RemoteIterator which returns CachePool objects. * @return A RemoteIterator which returns CachePool objects.
*/ */
@Idempotent @Idempotent
public RemoteIterator<CachePool> listCachePools(long prevPoolId, public RemoteIterator<CachePoolInfo> listCachePools(String prevPool)
int maxRepliesPerRequest) throws IOException; throws IOException;
} }

View File

@ -25,7 +25,7 @@ import com.google.common.collect.ComparisonChain;
import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
/** /**
@ -33,13 +33,14 @@ import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPat
*/ */
public class PathCacheDirective implements Comparable<PathCacheDirective> { public class PathCacheDirective implements Comparable<PathCacheDirective> {
private final String path; private final String path;
private final long poolId;
public PathCacheDirective(String path, long poolId) { private final String pool;
public PathCacheDirective(String path, String pool) {
Preconditions.checkNotNull(path); Preconditions.checkNotNull(path);
Preconditions.checkArgument(poolId > 0); Preconditions.checkNotNull(pool);
this.path = path; this.path = path;
this.poolId = poolId; this.pool = pool;
} }
/** /**
@ -52,8 +53,8 @@ public class PathCacheDirective implements Comparable<PathCacheDirective> {
/** /**
* @return The pool used in this request. * @return The pool used in this request.
*/ */
public long getPoolId() { public String getPool() {
return poolId; return pool;
} }
/** /**
@ -69,22 +70,22 @@ public class PathCacheDirective implements Comparable<PathCacheDirective> {
if (!DFSUtil.isValidName(path)) { if (!DFSUtil.isValidName(path)) {
throw new InvalidPathNameError(this); throw new InvalidPathNameError(this);
} }
if (poolId <= 0) { if (pool.isEmpty()) {
throw new InvalidPoolError(this); throw new InvalidPoolNameError(this);
} }
} }
@Override @Override
public int compareTo(PathCacheDirective rhs) { public int compareTo(PathCacheDirective rhs) {
return ComparisonChain.start(). return ComparisonChain.start().
compare(poolId, rhs.getPoolId()). compare(pool, rhs.getPool()).
compare(path, rhs.getPath()). compare(path, rhs.getPath()).
result(); result();
} }
@Override @Override
public int hashCode() { public int hashCode() {
return new HashCodeBuilder().append(path).append(poolId).hashCode(); return new HashCodeBuilder().append(path).append(pool).hashCode();
} }
@Override @Override
@ -101,7 +102,7 @@ public class PathCacheDirective implements Comparable<PathCacheDirective> {
public String toString() { public String toString() {
StringBuilder builder = new StringBuilder(); StringBuilder builder = new StringBuilder();
builder.append("{ path:").append(path). builder.append("{ path:").append(path).
append(", poolId:").append(poolId). append(", pool:").append(pool).
append(" }"); append(" }");
return builder.toString(); return builder.toString();
} }

View File

@ -27,9 +27,11 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -112,6 +114,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCa
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
@ -171,6 +174,7 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
@ -1035,19 +1039,16 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public AddPathCacheDirectivesResponseProto addPathCacheDirectives( public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController controller,
RpcController controller, AddPathCacheDirectivesRequestProto request) AddPathCacheDirectivesRequestProto request) throws ServiceException {
throws ServiceException {
try { try {
ArrayList<PathCacheDirective> input = ArrayList<PathCacheDirective> input =
new ArrayList<PathCacheDirective>(request.getElementsCount()); new ArrayList<PathCacheDirective>(request.getElementsCount());
for (int i = 0; i < request.getElementsCount(); i++) { for (int i = 0; i < request.getElementsCount(); i++) {
PathCacheDirectiveProto proto = request.getElements(i); PathCacheDirectiveProto proto = request.getElements(i);
input.add(new PathCacheDirective(proto.getPath(), input.add(new PathCacheDirective(proto.getPath(), proto.getPool()));
proto.getPool().getId()));
} }
List<Fallible<PathCacheEntry>> output = server List<Fallible<PathCacheEntry>> output = server.addPathCacheDirectives(input);
.addPathCacheDirectives(input);
AddPathCacheDirectivesResponseProto.Builder builder = AddPathCacheDirectivesResponseProto.Builder builder =
AddPathCacheDirectivesResponseProto.newBuilder(); AddPathCacheDirectivesResponseProto.newBuilder();
for (int idx = 0; idx < output.size(); idx++) { for (int idx = 0; idx < output.size(); idx++) {
@ -1060,7 +1061,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} catch (InvalidPathNameError ioe) { } catch (InvalidPathNameError ioe) {
builder.addResults(AddPathCacheDirectiveErrorProto. builder.addResults(AddPathCacheDirectiveErrorProto.
INVALID_PATH_NAME_ERROR_VALUE); INVALID_PATH_NAME_ERROR_VALUE);
} catch (InvalidPoolError ioe) { } catch (InvalidPoolNameError ioe) {
builder.addResults(AddPathCacheDirectiveErrorProto. builder.addResults(AddPathCacheDirectiveErrorProto.
INVALID_POOL_NAME_ERROR_VALUE); INVALID_POOL_NAME_ERROR_VALUE);
} catch (IOException ioe) { } catch (IOException ioe) {
@ -1108,21 +1109,20 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public ListPathCacheEntriesResponseProto listPathCacheEntries( public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController controller,
RpcController controller, ListPathCacheEntriesRequestProto request) ListPathCacheEntriesRequestProto request) throws ServiceException {
throws ServiceException {
try { try {
CachePool pool = PBHelper.convert(request.getPool());
RemoteIterator<PathCacheEntry> iter = RemoteIterator<PathCacheEntry> iter =
server.listPathCacheEntries( server.listPathCacheEntries(request.getPrevId(), request.getPool());
PBHelper.convert(request.getPrevEntry()).getEntryId(),
pool.getId(),
request.getMaxReplies());
ListPathCacheEntriesResponseProto.Builder builder = ListPathCacheEntriesResponseProto.Builder builder =
ListPathCacheEntriesResponseProto.newBuilder(); ListPathCacheEntriesResponseProto.newBuilder();
while (iter.hasNext()) { while (iter.hasNext()) {
PathCacheEntry entry = iter.next(); PathCacheEntry entry = iter.next();
builder.addEntries(PBHelper.convert(entry)); builder.addElements(
ListPathCacheEntriesElementProto.newBuilder().
setId(entry.getEntryId()).
setPath(entry.getDirective().getPath()).
setPool(entry.getDirective().getPool()));
} }
return builder.build(); return builder.build();
} catch (IOException e) { } catch (IOException e) {
@ -1134,20 +1134,46 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public AddCachePoolResponseProto addCachePool(RpcController controller, public AddCachePoolResponseProto addCachePool(RpcController controller,
AddCachePoolRequestProto request) throws ServiceException { AddCachePoolRequestProto request) throws ServiceException {
try { try {
server.addCachePool(PBHelper.convert(request.getInfo())); CachePoolInfo info =
new CachePoolInfo(request.getPoolName());
if (request.hasOwnerName()) {
info.setOwnerName(request.getOwnerName());
}
if (request.hasGroupName()) {
info.setGroupName(request.getGroupName());
}
if (request.hasMode()) {
info.setMode(new FsPermission((short)request.getMode()));
}
if (request.hasWeight()) {
info.setWeight(request.getWeight());
}
server.addCachePool(info);
return AddCachePoolResponseProto.newBuilder().build(); return AddCachePoolResponseProto.newBuilder().build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
@Override @Override
public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, public ModifyCachePoolResponseProto modifyCachePool(RpcController controller,
ModifyCachePoolRequestProto request) throws ServiceException { ModifyCachePoolRequestProto request) throws ServiceException {
try { try {
server.modifyCachePool( CachePoolInfo info =
PBHelper.convert(request.getPool()).getId(), new CachePoolInfo(request.getPoolName());
PBHelper.convert(request.getInfo())); if (request.hasOwnerName()) {
info.setOwnerName(request.getOwnerName());
}
if (request.hasGroupName()) {
info.setGroupName(request.getGroupName());
}
if (request.hasMode()) {
info.setMode(new FsPermission((short)request.getMode()));
}
if (request.hasWeight()) {
info.setWeight(request.getWeight());
}
server.modifyCachePool(info);
return ModifyCachePoolResponseProto.newBuilder().build(); return ModifyCachePoolResponseProto.newBuilder().build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
@ -1158,7 +1184,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public RemoveCachePoolResponseProto removeCachePool(RpcController controller, public RemoveCachePoolResponseProto removeCachePool(RpcController controller,
RemoveCachePoolRequestProto request) throws ServiceException { RemoveCachePoolRequestProto request) throws ServiceException {
try { try {
server.removeCachePool(PBHelper.convert(request.getPool()).getId()); server.removeCachePool(request.getPoolName());
return RemoveCachePoolResponseProto.newBuilder().build(); return RemoveCachePoolResponseProto.newBuilder().build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
@ -1169,16 +1195,27 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public ListCachePoolsResponseProto listCachePools(RpcController controller, public ListCachePoolsResponseProto listCachePools(RpcController controller,
ListCachePoolsRequestProto request) throws ServiceException { ListCachePoolsRequestProto request) throws ServiceException {
try { try {
RemoteIterator<CachePool> iter = RemoteIterator<CachePoolInfo> iter =
server.listCachePools(PBHelper.convert(request.getPrevPool()).getId(), server.listCachePools(request.getPrevPoolName());
request.getMaxReplies());
ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.Builder responseBuilder =
ListCachePoolsResponseProto.newBuilder(); ListCachePoolsResponseProto.newBuilder();
while (iter.hasNext()) { while (iter.hasNext()) {
CachePool pool = iter.next(); CachePoolInfo pool = iter.next();
ListCachePoolsResponseElementProto.Builder elemBuilder = ListCachePoolsResponseElementProto.Builder elemBuilder =
ListCachePoolsResponseElementProto.newBuilder(); ListCachePoolsResponseElementProto.newBuilder();
elemBuilder.setPool(PBHelper.convert(pool)); elemBuilder.setPoolName(pool.getPoolName());
if (pool.getOwnerName() != null) {
elemBuilder.setOwnerName(pool.getOwnerName());
}
if (pool.getGroupName() != null) {
elemBuilder.setGroupName(pool.getGroupName());
}
if (pool.getMode() != null) {
elemBuilder.setMode(pool.getMode().toShort());
}
if (pool.getWeight() != null) {
elemBuilder.setWeight(pool.getWeight());
}
responseBuilder.addElements(elemBuilder.build()); responseBuilder.addElements(elemBuilder.build());
} }
return responseBuilder.build(); return responseBuilder.build();

View File

@ -23,6 +23,7 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -37,12 +38,17 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -55,18 +61,14 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto;
@ -107,23 +109,23 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@ -144,7 +146,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -1026,7 +1027,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
return new InvalidPathNameError(directive); return new InvalidPathNameError(directive);
} else if (code == AddPathCacheDirectiveErrorProto. } else if (code == AddPathCacheDirectiveErrorProto.
INVALID_POOL_NAME_ERROR_VALUE) { INVALID_POOL_NAME_ERROR_VALUE) {
return new InvalidPoolError(directive); return new InvalidPoolNameError(directive);
} else { } else {
return new UnexpectedAddPathCacheDirectiveException(directive); return new UnexpectedAddPathCacheDirectiveException(directive);
} }
@ -1041,7 +1042,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
for (PathCacheDirective directive : directives) { for (PathCacheDirective directive : directives) {
builder.addElements(PathCacheDirectiveProto.newBuilder(). builder.addElements(PathCacheDirectiveProto.newBuilder().
setPath(directive.getPath()). setPath(directive.getPath()).
setPool(PBHelper.convert(new CachePool(directive.getPoolId()))). setPool(directive.getPool()).
build()); build());
} }
AddPathCacheDirectivesResponseProto result = AddPathCacheDirectivesResponseProto result =
@ -1120,40 +1121,45 @@ public class ClientNamenodeProtocolTranslatorPB implements
@Override @Override
public PathCacheEntry get(int i) { public PathCacheEntry get(int i) {
PathCacheEntryProto entryProto = response.getEntries(i); ListPathCacheEntriesElementProto elementProto =
return PBHelper.convert(entryProto); response.getElements(i);
return new PathCacheEntry(elementProto.getId(),
new PathCacheDirective(elementProto.getPath(),
elementProto.getPool()));
} }
@Override @Override
public int size() { public int size() {
return response.getEntriesCount(); return response.getElementsCount();
}
@Override
public boolean hasMore() {
return response.getHasMore();
} }
} }
private class PathCacheEntriesIterator private class PathCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathCacheEntry> { extends BatchedRemoteIterator<Long, PathCacheEntry> {
private final long poolId; private final String pool;
public PathCacheEntriesIterator(long prevKey, int maxRepliesPerRequest, public PathCacheEntriesIterator(long prevKey, String pool) {
long poolId) { super(prevKey);
super(prevKey, maxRepliesPerRequest); this.pool = pool;
this.poolId = poolId;
} }
@Override @Override
public BatchedEntries<PathCacheEntry> makeRequest( public BatchedEntries<PathCacheEntry> makeRequest(
Long prevEntryId, int maxRepliesPerRequest) throws IOException { Long nextKey) throws IOException {
ListPathCacheEntriesResponseProto response; ListPathCacheEntriesResponseProto response;
try { try {
ListPathCacheEntriesRequestProto req = ListPathCacheEntriesRequestProto req =
ListPathCacheEntriesRequestProto.newBuilder(). ListPathCacheEntriesRequestProto.newBuilder().
setPrevEntry( setPrevId(nextKey).
PBHelper.convert(new PathCacheEntry(prevEntryId, null))). setPool(pool).
setPool(PBHelper.convert(new CachePool(poolId))).
setMaxReplies(maxRepliesPerRequest).
build(); build();
response = rpcProxy.listPathCacheEntries(null, req); response = rpcProxy.listPathCacheEntries(null, req);
if (response.getEntriesCount() == 0) { if (response.getElementsCount() == 0) {
response = null; response = null;
} }
} catch (ServiceException e) { } catch (ServiceException e) {
@ -1170,30 +1176,51 @@ public class ClientNamenodeProtocolTranslatorPB implements
@Override @Override
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId, public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
long poolId, int repliesPerRequest) throws IOException { String pool) throws IOException {
return new PathCacheEntriesIterator(prevId, repliesPerRequest, poolId); return new PathCacheEntriesIterator(prevId, pool);
} }
@Override @Override
public CachePool addCachePool(CachePoolInfo info) throws IOException { public void addCachePool(CachePoolInfo info) throws IOException {
AddCachePoolRequestProto.Builder builder = AddCachePoolRequestProto.Builder builder =
AddCachePoolRequestProto.newBuilder(); AddCachePoolRequestProto.newBuilder();
builder.setInfo(PBHelper.convert(info)); builder.setPoolName(info.getPoolName());
if (info.getOwnerName() != null) {
builder.setOwnerName(info.getOwnerName());
}
if (info.getGroupName() != null) {
builder.setGroupName(info.getGroupName());
}
if (info.getMode() != null) {
builder.setMode(info.getMode().toShort());
}
if (info.getWeight() != null) {
builder.setWeight(info.getWeight());
}
try { try {
return PBHelper.convert( rpcProxy.addCachePool(null, builder.build());
rpcProxy.addCachePool(null, builder.build()).getPool());
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override @Override
public void modifyCachePool(long poolId, CachePoolInfo info) public void modifyCachePool(CachePoolInfo req) throws IOException {
throws IOException { ModifyCachePoolRequestProto.Builder builder =
ModifyCachePoolRequestProto.Builder builder = ModifyCachePoolRequestProto.newBuilder();
ModifyCachePoolRequestProto.newBuilder() builder.setPoolName(req.getPoolName());
.setPool(PBHelper.convert(new CachePool(poolId))) if (req.getOwnerName() != null) {
.setInfo(PBHelper.convert(info)); builder.setOwnerName(req.getOwnerName());
}
if (req.getGroupName() != null) {
builder.setGroupName(req.getGroupName());
}
if (req.getMode() != null) {
builder.setMode(req.getMode().toShort());
}
if (req.getWeight() != null) {
builder.setWeight(req.getWeight());
}
try { try {
rpcProxy.modifyCachePool(null, builder.build()); rpcProxy.modifyCachePool(null, builder.build());
} catch (ServiceException e) { } catch (ServiceException e) {
@ -1202,69 +1229,74 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public void removeCachePool(long poolId) throws IOException { public void removeCachePool(String cachePoolName) throws IOException {
try { try {
rpcProxy.removeCachePool(null, rpcProxy.removeCachePool(null,
RemoveCachePoolRequestProto.newBuilder(). RemoveCachePoolRequestProto.newBuilder().
setPool(PBHelper.convert(new CachePool(poolId))). setPoolName(cachePoolName).build());
build());
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
private static class BatchedPathDirectiveEntries private static class BatchedPathDirectiveEntries
implements BatchedEntries<CachePool> { implements BatchedEntries<CachePoolInfo> {
private final ListCachePoolsResponseProto proto; private final ListCachePoolsResponseProto proto;
public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) { public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) {
this.proto = proto; this.proto = proto;
} }
@Override @Override
public CachePool get(int i) { public CachePoolInfo get(int i) {
ListCachePoolsResponseElementProto elem = proto.getElements(i); ListCachePoolsResponseElementProto elem = proto.getElements(i);
return PBHelper.convert(elem.getPool()); return new CachePoolInfo(elem.getPoolName()).
setOwnerName(elem.getOwnerName()).
setGroupName(elem.getGroupName()).
setMode(new FsPermission((short)elem.getMode())).
setWeight(elem.getWeight());
} }
@Override @Override
public int size() { public int size() {
return proto.getElementsCount(); return proto.getElementsCount();
} }
@Override
public boolean hasMore() {
return proto.getHasMore();
}
} }
private class CachePoolIterator
extends BatchedRemoteIterator<String, CachePoolInfo> {
private class CachePoolIterator public CachePoolIterator(String prevKey) {
extends BatchedRemoteIterator<Long, CachePool> { super(prevKey);
public CachePoolIterator(Long prevKey, int maxRepliesPerRequest) {
super(prevKey, maxRepliesPerRequest);
} }
@Override @Override
public BatchedEntries<CachePool> makeRequest(Long prevKey, public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
int maxRepliesPerRequest) throws IOException { throws IOException {
try { try {
return new BatchedPathDirectiveEntries( return new BatchedPathDirectiveEntries(
rpcProxy.listCachePools(null, rpcProxy.listCachePools(null,
ListCachePoolsRequestProto.newBuilder(). ListCachePoolsRequestProto.newBuilder().
setPrevPool(PBHelper.convert(new CachePool(prevKey))). setPrevPoolName(prevKey).build()));
setMaxReplies(maxRepliesPerRequest).
build()));
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override @Override
public Long elementToPrevKey(CachePool element) { public String elementToPrevKey(CachePoolInfo element) {
return element.getId(); return element.getPoolName();
} }
} }
@Override @Override
public RemoteIterator<CachePool> listCachePools(long prevPoolId, public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
int maxRepliesPerRequest) throws IOException { throws IOException {
return new CachePoolIterator(prevPoolId, maxRepliesPerRequest); return new CachePoolIterator(prevKey);
} }
} }

View File

@ -32,13 +32,10 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -53,15 +50,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
@ -123,7 +114,6 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
@ -1503,74 +1493,6 @@ public class PBHelper {
return HdfsProtos.ChecksumTypeProto.valueOf(type.id); return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
} }
public static PathCacheDirective convert(
PathCacheDirectiveProto directiveProto) {
CachePool pool = convert(directiveProto.getPool());
return new PathCacheDirective(directiveProto.getPath(), pool.getId());
}
public static PathCacheDirectiveProto convert(PathCacheDirective directive) {
PathCacheDirectiveProto.Builder builder =
PathCacheDirectiveProto.newBuilder()
.setPath(directive.getPath())
.setPool(PBHelper.convert(new CachePool(directive.getPoolId())));
return builder.build();
}
public static PathCacheEntry convert(PathCacheEntryProto entryProto) {
long entryId = entryProto.getId();
PathCacheDirective directive = convert(entryProto.getDirective());
return new PathCacheEntry(entryId, directive);
}
public static PathCacheEntryProto convert(PathCacheEntry entry) {
PathCacheEntryProto.Builder builder = PathCacheEntryProto.newBuilder()
.setId(entry.getEntryId())
.setDirective(PBHelper.convert(entry.getDirective()));
return builder.build();
}
public static CachePoolInfo convert(CachePoolInfoProto infoProto) {
CachePoolInfo.Builder builder =
CachePoolInfo.newBuilder().setPoolName(infoProto.getPoolName());
if (infoProto.hasOwnerName()) {
builder.setOwnerName(infoProto.getOwnerName());
}
if (infoProto.hasGroupName()) {
builder.setGroupName(infoProto.getGroupName());
}
if (infoProto.hasMode()) {
builder.setMode(new FsPermission((short) infoProto.getMode()));
}
if (infoProto.hasWeight()) {
builder.setWeight(infoProto.getWeight());
}
return builder.build();
}
public static CachePoolInfoProto convert(CachePoolInfo info) {
CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder()
.setPoolName(info.getPoolName())
.setOwnerName(info.getOwnerName())
.setGroupName(info.getGroupName())
.setMode(info.getMode().toShort())
.setWeight(info.getWeight());
return builder.build();
}
public static CachePool convert(CachePoolProto poolProto) {
CachePoolInfo info = convert(poolProto.getInfo());
CachePool pool = new CachePool(poolProto.getId(), info);
return pool;
}
public static CachePoolProto convert(CachePool pool) {
CachePoolProto.Builder builder = CachePoolProto.newBuilder()
.setId(pool.getId())
.setInfo(convert(pool.getInfo()));
return builder.build();
}
public static InputStream vintPrefixed(final InputStream input) public static InputStream vintPrefixed(final InputStream input)
throws IOException { throws IOException {
final int firstByte = input.read(); final int firstByte = input.read();

View File

@ -17,28 +17,34 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator; import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.Map.Entry;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.Fallible;
/** /**
@ -65,62 +71,58 @@ final class CacheManager {
/** /**
* Cache pools, sorted by name. * Cache pools, sorted by name.
*/ */
private final TreeMap<String, CachePool> cachePoolsByName = private final TreeMap<String, CachePool> cachePools =
new TreeMap<String, CachePool>(); new TreeMap<String, CachePool>();
/**
* Cache pools, sorted by ID
*/
private final TreeMap<Long, CachePool> cachePoolsById =
new TreeMap<Long, CachePool>();
/** /**
* The entry ID to use for a new entry. * The entry ID to use for a new entry.
*/ */
private long nextEntryId; private long nextEntryId;
/** /**
* The pool ID to use for a new pool. * Maximum number of cache pools to list in one operation.
*/ */
private long nextPoolId; private final int maxListCachePoolsResponses;
/**
* Maximum number of cache pool directives to list in one operation.
*/
private final int maxListCacheDirectivesResponses;
CacheManager(FSDirectory dir, Configuration conf) { CacheManager(FSDirectory dir, Configuration conf) {
// TODO: support loading and storing of the CacheManager state // TODO: support loading and storing of the CacheManager state
clear(); clear();
maxListCachePoolsResponses = conf.getInt(
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
maxListCacheDirectivesResponses = conf.getInt(
DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
} }
synchronized void clear() { synchronized void clear() {
entriesById.clear(); entriesById.clear();
entriesByDirective.clear(); entriesByDirective.clear();
cachePoolsByName.clear(); cachePools.clear();
cachePoolsById.clear();
nextEntryId = 1; nextEntryId = 1;
nextPoolId = 1;
} }
synchronized long getNextEntryId() throws IOException { synchronized long getNextEntryId() throws IOException {
if (nextEntryId == Long.MAX_VALUE) { if (nextEntryId == Long.MAX_VALUE) {
throw new IOException("no more available entry IDs"); throw new IOException("no more available IDs");
} }
return nextEntryId++; return nextEntryId++;
} }
synchronized long getNextPoolId() throws IOException {
if (nextPoolId == Long.MAX_VALUE) {
throw new IOException("no more available pool IDs");
}
return nextPoolId++;
}
private synchronized Fallible<PathCacheEntry> addDirective( private synchronized Fallible<PathCacheEntry> addDirective(
FSPermissionChecker pc, PathCacheDirective directive) { PathCacheDirective directive, FSPermissionChecker pc) {
CachePool pool = cachePoolsById.get(directive.getPoolId()); CachePool pool = cachePools.get(directive.getPool());
if (pool == null) { if (pool == null) {
LOG.info("addDirective " + directive + ": pool not found."); LOG.info("addDirective " + directive + ": pool not found.");
return new Fallible<PathCacheEntry>( return new Fallible<PathCacheEntry>(
new InvalidPoolError(directive)); new InvalidPoolNameError(directive));
} }
if (!pc.checkPermission(pool, FsAction.WRITE)) { if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) {
LOG.info("addDirective " + directive + ": write permission denied."); LOG.info("addDirective " + directive + ": write permission denied.");
return new Fallible<PathCacheEntry>( return new Fallible<PathCacheEntry>(
new PoolWritePermissionDeniedError(directive)); new PoolWritePermissionDeniedError(directive));
@ -155,17 +157,17 @@ final class CacheManager {
} }
public synchronized List<Fallible<PathCacheEntry>> addDirectives( public synchronized List<Fallible<PathCacheEntry>> addDirectives(
FSPermissionChecker pc, List<PathCacheDirective> directives) { List<PathCacheDirective> directives, FSPermissionChecker pc) {
ArrayList<Fallible<PathCacheEntry>> results = ArrayList<Fallible<PathCacheEntry>> results =
new ArrayList<Fallible<PathCacheEntry>>(directives.size()); new ArrayList<Fallible<PathCacheEntry>>(directives.size());
for (PathCacheDirective directive: directives) { for (PathCacheDirective directive: directives) {
results.add(addDirective(pc, directive)); results.add(addDirective(directive, pc));
} }
return results; return results;
} }
private synchronized Fallible<Long> removeEntry(FSPermissionChecker pc, private synchronized Fallible<Long> removeEntry(long entryId,
long entryId) { FSPermissionChecker pc) {
// Check for invalid IDs. // Check for invalid IDs.
if (entryId <= 0) { if (entryId <= 0) {
LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID."); LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID.");
@ -177,20 +179,20 @@ final class CacheManager {
LOG.info("removeEntry " + entryId + ": entry not found."); LOG.info("removeEntry " + entryId + ": entry not found.");
return new Fallible<Long>(new NoSuchIdException(entryId)); return new Fallible<Long>(new NoSuchIdException(entryId));
} }
CachePool pool = cachePoolsById.get(existing.getDirective().getPoolId()); CachePool pool = cachePools.get(existing.getDirective().getPool());
if (pool == null) { if (pool == null) {
LOG.info("removeEntry " + entryId + ": pool not found for directive " + LOG.info("removeEntry " + entryId + ": pool not found for directive " +
existing.getDirective()); existing.getDirective());
return new Fallible<Long>( return new Fallible<Long>(
new UnexpectedRemovePathCacheEntryException(entryId)); new UnexpectedRemovePathCacheEntryException(entryId));
} }
if (!pc.checkPermission(pool, FsAction.WRITE)) { if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) {
LOG.info("removeEntry " + entryId + ": write permission denied to " + LOG.info("removeEntry " + entryId + ": write permission denied to " +
"pool " + pool + " for entry " + existing); "pool " + pool + " for entry " + existing);
return new Fallible<Long>( return new Fallible<Long>(
new RemovePermissionDeniedException(entryId)); new RemovePermissionDeniedException(entryId));
} }
// Remove the corresponding entry in entriesByDirective. // Remove the corresponding entry in entriesByDirective.
if (entriesByDirective.remove(existing.getDirective()) == null) { if (entriesByDirective.remove(existing.getDirective()) == null) {
LOG.warn("removeEntry " + entryId + ": failed to find existing entry " + LOG.warn("removeEntry " + entryId + ": failed to find existing entry " +
@ -202,41 +204,43 @@ final class CacheManager {
return new Fallible<Long>(entryId); return new Fallible<Long>(entryId);
} }
public synchronized List<Fallible<Long>> removeEntries(FSPermissionChecker pc, public synchronized List<Fallible<Long>> removeEntries(List<Long> entryIds,
List<Long> entryIds) { FSPermissionChecker pc) {
ArrayList<Fallible<Long>> results = ArrayList<Fallible<Long>> results =
new ArrayList<Fallible<Long>>(entryIds.size()); new ArrayList<Fallible<Long>>(entryIds.size());
for (Long entryId : entryIds) { for (Long entryId : entryIds) {
results.add(removeEntry(pc, entryId)); results.add(removeEntry(entryId, pc));
} }
return results; return results;
} }
public synchronized List<PathCacheEntry> listPathCacheEntries( public synchronized BatchedListEntries<PathCacheEntry>
FSPermissionChecker pc, long prevId, Long poolId, int maxReplies) { listPathCacheEntries(long prevId, String filterPool, FSPermissionChecker pc) {
final int MAX_PRE_ALLOCATED_ENTRIES = 16; final int NUM_PRE_ALLOCATED_ENTRIES = 16;
ArrayList<PathCacheEntry> replies = new ArrayList<PathCacheEntry>( ArrayList<PathCacheEntry> replies =
Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies)); new ArrayList<PathCacheEntry>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0; int numReplies = 0;
SortedMap<Long, PathCacheEntry> tailMap = entriesById.tailMap(prevId + 1); SortedMap<Long, PathCacheEntry> tailMap = entriesById.tailMap(prevId + 1);
for (PathCacheEntry entry : tailMap.values()) { for (Entry<Long, PathCacheEntry> cur : tailMap.entrySet()) {
if (numReplies >= maxReplies) { if (numReplies >= maxListCacheDirectivesResponses) {
return replies; return new BatchedListEntries<PathCacheEntry>(replies, true);
} }
long entryPoolId = entry.getDirective().getPoolId(); PathCacheEntry curEntry = cur.getValue();
if (poolId == null || poolId <= 0 || entryPoolId == poolId) { if (!filterPool.isEmpty() &&
if (pc.checkPermission( !cur.getValue().getDirective().getPool().equals(filterPool)) {
cachePoolsById.get(entryPoolId), FsAction.EXECUTE)) { continue;
replies.add(entry); }
numReplies++; CachePool pool = cachePools.get(curEntry.getDirective().getPool());
} if (pool == null) {
LOG.error("invalid pool for PathCacheEntry " + curEntry);
continue;
}
if (pc.checkPermission(pool, FsAction.EXECUTE)) {
replies.add(cur.getValue());
numReplies++;
} }
} }
return replies; return new BatchedListEntries<PathCacheEntry>(replies, false);
}
synchronized CachePool getCachePool(long id) {
return cachePoolsById.get(id);
} }
/** /**
@ -246,24 +250,22 @@ final class CacheManager {
* *
* @param info * @param info
* The info for the cache pool to create. * The info for the cache pool to create.
* @return created CachePool
*/ */
public synchronized CachePool addCachePool(CachePoolInfo info) public synchronized void addCachePool(CachePoolInfo info)
throws IOException { throws IOException {
String poolName = info.getPoolName(); String poolName = info.getPoolName();
if (poolName == null || poolName.isEmpty()) { if (poolName.isEmpty()) {
throw new IOException("invalid empty cache pool name"); throw new IOException("invalid empty cache pool name");
} }
if (cachePoolsByName.containsKey(poolName)) { CachePool pool = cachePools.get(poolName);
if (pool != null) {
throw new IOException("cache pool " + poolName + " already exists."); throw new IOException("cache pool " + poolName + " already exists.");
} }
CachePool cachePool = new CachePool(getNextPoolId(), poolName, CachePool cachePool = new CachePool(poolName,
info.getOwnerName(), info.getGroupName(), info.getMode(), info.getOwnerName(), info.getGroupName(), info.getMode(),
info.getWeight()); info.getWeight());
cachePoolsById.put(cachePool.getId(), cachePool); cachePools.put(poolName, cachePool);
cachePoolsByName.put(poolName, cachePool);
LOG.info("created new cache pool " + cachePool); LOG.info("created new cache pool " + cachePool);
return cachePool;
} }
/** /**
@ -274,62 +276,46 @@ final class CacheManager {
* @param info * @param info
* The info for the cache pool to modify. * The info for the cache pool to modify.
*/ */
public synchronized void modifyCachePool(long poolId, CachePoolInfo info) public synchronized void modifyCachePool(CachePoolInfo info)
throws IOException { throws IOException {
if (poolId <= 0) { String poolName = info.getPoolName();
throw new IOException("invalid pool id " + poolId); if (poolName.isEmpty()) {
throw new IOException("invalid empty cache pool name");
} }
if (!cachePoolsById.containsKey(poolId)) { CachePool pool = cachePools.get(poolName);
throw new IOException("cache pool id " + poolId + " does not exist."); if (pool == null) {
throw new IOException("cache pool " + poolName + " does not exist.");
} }
CachePool pool = cachePoolsById.get(poolId);
// Remove the old CachePoolInfo
removeCachePool(poolId);
// Build up the new CachePoolInfo
CachePoolInfo.Builder newInfo = CachePoolInfo.newBuilder(pool.getInfo());
StringBuilder bld = new StringBuilder(); StringBuilder bld = new StringBuilder();
String prefix = ""; String prefix = "";
if (info.getPoolName() != null) {
newInfo.setPoolName(info.getPoolName());
bld.append(prefix).
append("set name to ").append(info.getOwnerName());
prefix = "; ";
}
if (info.getOwnerName() != null) { if (info.getOwnerName() != null) {
newInfo.setOwnerName(info.getOwnerName()); pool.setOwnerName(info.getOwnerName());
bld.append(prefix). bld.append(prefix).
append("set owner to ").append(info.getOwnerName()); append("set owner to ").append(info.getOwnerName());
prefix = "; "; prefix = "; ";
} }
if (info.getGroupName() != null) { if (info.getGroupName() != null) {
newInfo.setGroupName(info.getGroupName()); pool.setGroupName(info.getGroupName());
bld.append(prefix). bld.append(prefix).
append("set group to ").append(info.getGroupName()); append("set group to ").append(info.getGroupName());
prefix = "; "; prefix = "; ";
} }
if (info.getMode() != null) { if (info.getMode() != null) {
newInfo.setMode(info.getMode()); pool.setMode(info.getMode());
bld.append(prefix). bld.append(prefix).
append(String.format("set mode to ", info.getMode())); append(String.format("set mode to 0%3o", info.getMode()));
prefix = "; "; prefix = "; ";
} }
if (info.getWeight() != null) { if (info.getWeight() != null) {
newInfo.setWeight(info.getWeight()); pool.setWeight(info.getWeight());
bld.append(prefix). bld.append(prefix).
append("set weight to ").append(info.getWeight()); append("set weight to ").append(info.getWeight());
prefix = "; "; prefix = "; ";
} }
if (prefix.isEmpty()) { if (prefix.isEmpty()) {
bld.append("no changes."); bld.append("no changes.");
} else {
pool.setInfo(newInfo.build());
} }
// Put the newly modified info back in LOG.info("modified " + poolName + "; " + bld.toString());
cachePoolsById.put(poolId, pool);
cachePoolsByName.put(info.getPoolName(), pool);
LOG.info("modified pool id " + pool.getId()
+ " (" + pool.getInfo().getPoolName() + "); "
+ bld.toString());
} }
/** /**
@ -337,39 +323,47 @@ final class CacheManager {
* *
* Only the superuser should be able to call this function. * Only the superuser should be able to call this function.
* *
* @param poolId * @param poolName
* The id of the cache pool to remove. * The name for the cache pool to remove.
*/ */
public synchronized void removeCachePool(long poolId) throws IOException { public synchronized void removeCachePool(String poolName)
if (!cachePoolsById.containsKey(poolId)) { throws IOException {
throw new IOException("can't remove nonexistent cache pool id " + poolId); CachePool pool = cachePools.remove(poolName);
if (pool == null) {
throw new IOException("can't remove nonexistent cache pool " + poolName);
} }
// Remove all the entries associated with the pool
Iterator<Map.Entry<Long, PathCacheEntry>> it = // Remove entries using this pool
entriesById.entrySet().iterator(); // TODO: could optimize this somewhat to avoid the need to iterate
while (it.hasNext()) { // over all entries in entriesByDirective
Map.Entry<Long, PathCacheEntry> entry = it.next(); Iterator<Entry<PathCacheDirective, PathCacheEntry>> iter =
if (entry.getValue().getDirective().getPoolId() == poolId) { entriesByDirective.entrySet().iterator();
it.remove(); while (iter.hasNext()) {
entriesByDirective.remove(entry.getValue().getDirective()); Entry<PathCacheDirective, PathCacheEntry> entry = iter.next();
if (entry.getKey().getPool().equals(poolName)) {
entriesById.remove(entry.getValue().getEntryId());
iter.remove();
} }
} }
// Remove the pool
CachePool pool = cachePoolsById.remove(poolId);
cachePoolsByName.remove(pool.getInfo().getPoolName());
} }
public synchronized List<CachePool> listCachePools(Long prevKey, public synchronized BatchedListEntries<CachePoolInfo>
int maxRepliesPerRequest) { listCachePools(FSPermissionChecker pc, String prevKey) {
final int MAX_PREALLOCATED_REPLIES = 16; final int NUM_PRE_ALLOCATED_ENTRIES = 16;
ArrayList<CachePool> results = ArrayList<CachePoolInfo> results =
new ArrayList<CachePool>(Math.min(MAX_PREALLOCATED_REPLIES, new ArrayList<CachePoolInfo>(NUM_PRE_ALLOCATED_ENTRIES);
maxRepliesPerRequest)); SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
SortedMap<Long, CachePool> tailMap = int numListed = 0;
cachePoolsById.tailMap(prevKey, false); for (Entry<String, CachePool> cur : tailMap.entrySet()) {
for (CachePool pool : tailMap.values()) { if (numListed++ >= maxListCachePoolsResponses) {
results.add(pool); return new BatchedListEntries<CachePoolInfo>(results, true);
}
if (pc == null) {
results.add(cur.getValue().getInfo(true));
} else {
results.add(cur.getValue().getInfo(pc));
}
} }
return results; return new BatchedListEntries<CachePoolInfo>(results, false);
} }
} }

View File

@ -19,119 +19,137 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import org.apache.commons.lang.builder.EqualsBuilder; import javax.annotation.Nonnull;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo.Builder; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
/** /**
* A CachePool describes a set of cache resources being managed by the NameNode. * A CachePool describes a set of cache resources being managed by the NameNode.
* User caching requests are billed to the cache pool specified in the request. * User caching requests are billed to the cache pool specified in the request.
* *
* CachePools are uniquely identified by a numeric id as well as the * This is an internal class, only used on the NameNode. For identifying or
* {@link CachePoolInfo} pool name. Mutable metadata is contained in * describing a cache pool to clients, please use CachePoolInfo.
* CachePoolInfo, including pool name, owner, group, and permissions.
* See this class for more details.
*/ */
@InterfaceAudience.Private
public final class CachePool { public final class CachePool {
public static final Log LOG = LogFactory.getLog(CachePool.class); public static final Log LOG = LogFactory.getLog(CachePool.class);
private final long id; @Nonnull
private final String poolName;
private CachePoolInfo info; @Nonnull
private String ownerName;
public CachePool(long id) { @Nonnull
this.id = id; private String groupName;
this.info = null;
} @Nonnull
private FsPermission mode;
CachePool(long id, String poolName, String ownerName, String groupName,
private int weight;
public CachePool(String poolName, String ownerName, String groupName,
FsPermission mode, Integer weight) throws IOException { FsPermission mode, Integer weight) throws IOException {
this.id = id; this.poolName = poolName;
// Set CachePoolInfo default fields if null
if (poolName == null || poolName.isEmpty()) {
throw new IOException("invalid empty cache pool name");
}
UserGroupInformation ugi = null; UserGroupInformation ugi = null;
if (ownerName == null) { if (ownerName == null) {
ugi = NameNode.getRemoteUser(); if (ugi == null) {
ownerName = ugi.getShortUserName(); ugi = NameNode.getRemoteUser();
}
this.ownerName = ugi.getShortUserName();
} else {
this.ownerName = ownerName;
} }
if (groupName == null) { if (groupName == null) {
if (ugi == null) { if (ugi == null) {
ugi = NameNode.getRemoteUser(); ugi = NameNode.getRemoteUser();
} }
String[] groups = ugi.getGroupNames(); this.groupName = ugi.getPrimaryGroupName();
if (groups.length == 0) { } else {
throw new IOException("failed to get group names from UGI " + ugi); this.groupName = ownerName;
}
groupName = groups[0];
} }
if (mode == null) { this.mode = mode != null ?
mode = FsPermission.getDirDefault(); new FsPermission(mode): FsPermission.getCachePoolDefault();
} this.weight = weight != null ? weight : 100;
if (weight == null) {
weight = 100;
}
CachePoolInfo.Builder builder = CachePoolInfo.newBuilder();
builder.setPoolName(poolName).setOwnerName(ownerName)
.setGroupName(groupName).setMode(mode).setWeight(weight);
this.info = builder.build();
} }
public CachePool(long id, CachePoolInfo info) { public String getName() {
this.id = id; return poolName;
this.info = info;
} }
/** public String getOwnerName() {
* @return id of the pool return ownerName;
*/
public long getId() {
return id;
} }
public CachePool setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
public String getGroupName() {
return groupName;
}
public CachePool setGroupName(String groupName) {
this.groupName = groupName;
return this;
}
public FsPermission getMode() {
return mode;
}
public CachePool setMode(FsPermission mode) {
this.mode = new FsPermission(mode);
return this;
}
public int getWeight() {
return weight;
}
public CachePool setWeight(int weight) {
this.weight = weight;
return this;
}
/** /**
* Get information about this cache pool. * Get information about this cache pool.
* *
* @param fullInfo
* If true, only the name will be returned (i.e., what you
* would get if you didn't have read permission for this pool.)
* @return * @return
* Cache pool information. * Cache pool information.
*/ */
public CachePoolInfo getInfo() { public CachePoolInfo getInfo(boolean fullInfo) {
return info; CachePoolInfo info = new CachePoolInfo(poolName);
if (!fullInfo) {
return info;
}
return info.setOwnerName(ownerName).
setGroupName(groupName).
setMode(new FsPermission(mode)).
setWeight(weight);
} }
void setInfo(CachePoolInfo info) { public CachePoolInfo getInfo(FSPermissionChecker pc) {
this.info = info; return getInfo(pc.checkPermission(this, FsAction.READ));
} }
public String toString() { public String toString() {
return new StringBuilder(). return new StringBuilder().
append("{ ").append("id:").append(id). append("{ ").append("poolName:").append(poolName).
append(", info:").append(info.toString()). append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName).
append(", mode:").append(mode).
append(", weight:").append(weight).
append(" }").toString(); append(" }").toString();
} }
@Override
public int hashCode() {
return new HashCodeBuilder().append(id).append(info).hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) { return false; }
if (obj == this) { return true; }
if (obj.getClass() != getClass()) {
return false;
}
CachePool rhs = (CachePool)obj;
return new EqualsBuilder()
.append(id, rhs.id)
.append(info, rhs.info)
.isEquals();
}
} }

View File

@ -119,6 +119,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -6756,9 +6757,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (retryCacheEntry != null && retryCacheEntry.isSuccess()) { if (retryCacheEntry != null && retryCacheEntry.isSuccess()) {
return (List<Fallible<PathCacheEntry>>) retryCacheEntry.getPayload(); return (List<Fallible<PathCacheEntry>>) retryCacheEntry.getPayload();
} }
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
boolean success = false; boolean success = false;
List<Fallible<PathCacheEntry>> results = null; List<Fallible<PathCacheEntry>> results = null;
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
@ -6766,7 +6769,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throw new SafeModeException( throw new SafeModeException(
"Cannot add path cache directive", safeMode); "Cannot add path cache directive", safeMode);
} }
results = cacheManager.addDirectives(pc, directives); results = cacheManager.addDirectives(directives, pc);
//getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119 //getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119
success = true; success = true;
} finally { } finally {
@ -6774,7 +6777,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (success) { if (success) {
getEditLog().logSync(); getEditLog().logSync();
} }
if (auditLog.isInfoEnabled() && isExternalInvocation()) { if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addPathCacheDirectives", null, null, null); logAuditEvent(success, "addPathCacheDirectives", null, null, null);
} }
RetryCache.setState(retryCacheEntry, success, results); RetryCache.setState(retryCacheEntry, success, results);
@ -6783,147 +6786,175 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
@SuppressWarnings("unchecked") @SuppressWarnings("unchecked")
List<Fallible<Long>> removePathCacheEntries(List<Long> ids) List<Fallible<Long>> removePathCacheEntries(List<Long> ids) throws IOException {
throws IOException { CacheEntryWithPayload retryCacheEntry =
final FSPermissionChecker pc = getPermissionChecker(); RetryCache.waitForCompletion(retryCache, null);
if (retryCacheEntry != null && retryCacheEntry.isSuccess()) {
return (List<Fallible<Long>>) retryCacheEntry.getPayload();
}
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
boolean success = false; boolean success = false;
List<Fallible<Long>> results = null; List<Fallible<Long>> results = null;
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { if (isInSafeMode()) {
throw new SafeModeException( throw new SafeModeException(
"Cannot add path cache directive", safeMode); "Cannot remove path cache directives", safeMode);
} }
results = cacheManager.removeEntries(pc, ids); results = cacheManager.removeEntries(ids, pc);
//getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119 //getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119
success = true; success = true;
} finally { } finally {
writeUnlock(); writeUnlock();
if (success) { if (isAuditEnabled() && isExternalInvocation()) {
getEditLog().logSync();
}
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(success, "removePathCacheEntries", null, null, null); logAuditEvent(success, "removePathCacheEntries", null, null, null);
} }
RetryCache.setState(retryCacheEntry, success, results);
}
getEditLog().logSync();
return results;
}
BatchedListEntries<PathCacheEntry> listPathCacheEntries(long startId,
String pool) throws IOException {
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
BatchedListEntries<PathCacheEntry> results;
checkOperation(OperationCategory.READ);
readLock();
boolean success = false;
try {
checkOperation(OperationCategory.READ);
results = cacheManager.listPathCacheEntries(startId, pool, pc);
success = true;
} finally {
readUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "listPathCacheEntries", null, null, null);
}
} }
return results; return results;
} }
List<PathCacheEntry> listPathCacheEntries(long startId, public void addCachePool(CachePoolInfo req) throws IOException {
Long poolId, int maxReplies) throws IOException { final FSPermissionChecker pc = isPermissionEnabled ?
LOG.info("listPathCacheEntries with " + startId + " " + poolId); getPermissionChecker() : null;
final FSPermissionChecker pc = getPermissionChecker();
return cacheManager.listPathCacheEntries(pc, startId, poolId, maxReplies);
}
public CachePool addCachePool(CachePoolInfo req) throws IOException {
final FSPermissionChecker pc = getPermissionChecker();
CacheEntryWithPayload cacheEntry =
RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (CachePool)cacheEntry.getPayload(); // Return previous response
}
writeLock();
CachePool pool = null;
try {
checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) {
throw new AccessControlException("Non-super users cannot " +
"add cache pools.");
}
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add cache pool " + req.getPoolName(), safeMode);
}
pool = cacheManager.addCachePool(req);
RetryCache.setState(cacheEntry, true);
//getEditLog().logAddCachePool(req); // FIXME: HDFS-5119
} finally {
writeUnlock();
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "addCachePool", req.getPoolName(), null, null);
}
return pool;
}
public void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException {
final FSPermissionChecker pc = getPermissionChecker();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return; // Return previous response
} }
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
boolean success = false;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) {
throw new AccessControlException("Non-super users cannot " +
"modify cache pools.");
}
if (isInSafeMode()) { if (isInSafeMode()) {
throw new SafeModeException( throw new SafeModeException(
"Cannot modify cache pool " + info.getPoolName(), safeMode); "Cannot add cache pool " + req.getPoolName(), safeMode);
} }
cacheManager.modifyCachePool(poolId, info); if (pc != null) {
RetryCache.setState(cacheEntry, true); pc.checkSuperuserPrivilege();
}
cacheManager.addCachePool(req);
//getEditLog().logAddCachePool(req); // FIXME: HDFS-5119
success = true;
} finally {
writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addCachePool", req.getPoolName(), null, null);
}
RetryCache.setState(cacheEntry, success);
}
getEditLog().logSync();
}
public void modifyCachePool(CachePoolInfo req) throws IOException {
final FSPermissionChecker pc =
isPermissionEnabled ? getPermissionChecker() : null;
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
checkOperation(OperationCategory.WRITE);
writeLock();
boolean success = false;
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot modify cache pool " + req.getPoolName(), safeMode);
}
if (pc != null) {
pc.checkSuperuserPrivilege();
}
cacheManager.modifyCachePool(req);
//getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119 //getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119
success = true;
} finally { } finally {
writeUnlock(); writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null);
}
RetryCache.setState(cacheEntry, success);
} }
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "modifyCachePool", info.getPoolName(), null, null);
}
} }
public void removeCachePool(long poolId) throws IOException { public void removeCachePool(String cachePoolName) throws IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc =
isPermissionEnabled ? getPermissionChecker() : null;
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
CachePool pool; boolean success = false;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) {
throw new AccessControlException("Non-super users cannot " +
"remove cache pools.");
}
pool = cacheManager.getCachePool(poolId);
if (isInSafeMode()) { if (isInSafeMode()) {
String identifier;
if (pool == null) {
identifier = "with id " + Long.toString(poolId);
} else {
identifier = pool.getInfo().getPoolName();
}
throw new SafeModeException( throw new SafeModeException(
"Cannot remove cache pool " + identifier, safeMode); "Cannot remove cache pool " + cachePoolName, safeMode);
} }
cacheManager.removeCachePool(poolId); if (pc != null) {
pc.checkSuperuserPrivilege();
}
cacheManager.removeCachePool(cachePoolName);
//getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119 //getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119
success = true;
} finally { } finally {
writeUnlock(); writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "removeCachePool", cachePoolName, null, null);
}
RetryCache.setState(cacheEntry, success);
} }
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "removeCachePool", pool.getInfo().getPoolName(),
null, null);
}
} }
public List<CachePool> listCachePools(long prevKey, public BatchedListEntries<CachePoolInfo> listCachePools(String prevKey)
int maxRepliesPerRequest) throws IOException { throws IOException {
List<CachePool> results; final FSPermissionChecker pc =
isPermissionEnabled ? getPermissionChecker() : null;
BatchedListEntries<CachePoolInfo> results;
checkOperation(OperationCategory.READ);
boolean success = false;
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
results = cacheManager.listCachePools(prevKey, maxRepliesPerRequest); results = cacheManager.listCachePools(pc, prevKey);
success = true;
} finally { } finally {
readUnlock(); readUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "listCachePools", null, null, null);
}
} }
return results; return results;
} }

View File

@ -17,6 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
@ -28,7 +29,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -264,16 +264,15 @@ class FSPermissionChecker {
* @return if the pool can be accessed * @return if the pool can be accessed
*/ */
public boolean checkPermission(CachePool pool, FsAction access) { public boolean checkPermission(CachePool pool, FsAction access) {
CachePoolInfo info = pool.getInfo(); FsPermission mode = pool.getMode();
FsPermission mode = info.getMode();
if (isSuperUser()) { if (isSuperUser()) {
return true; return true;
} }
if (user.equals(info.getOwnerName()) if (user.equals(pool.getOwnerName())
&& mode.getUserAction().implies(access)) { && mode.getUserAction().implies(access)) {
return true; return true;
} }
if (groups.contains(info.getGroupName()) if (groups.contains(pool.getGroupName())
&& mode.getGroupAction().implies(access)) { && mode.getGroupAction().implies(access)) {
return true; return true;
} }

View File

@ -31,11 +31,13 @@ import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.NoSuchElementException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator; import org.apache.hadoop.fs.BatchedRemoteIterator;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
@ -60,9 +62,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -1223,20 +1225,17 @@ class NameNodeRpcServer implements NamenodeProtocols {
private class ServerSidePathCacheEntriesIterator private class ServerSidePathCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathCacheEntry> { extends BatchedRemoteIterator<Long, PathCacheEntry> {
private final Long poolId; private final String pool;
public ServerSidePathCacheEntriesIterator(Long firstKey, public ServerSidePathCacheEntriesIterator(Long firstKey, String pool) {
int maxRepliesPerRequest, Long poolId) { super(firstKey);
super(firstKey, maxRepliesPerRequest); this.pool = pool;
this.poolId = poolId;
} }
@Override @Override
public BatchedEntries<PathCacheEntry> makeRequest( public BatchedEntries<PathCacheEntry> makeRequest(
Long prevKey, int maxRepliesPerRequest) throws IOException { Long nextKey) throws IOException {
return new BatchedListEntries<PathCacheEntry>( return namesystem.listPathCacheEntries(nextKey, pool);
namesystem.listPathCacheEntries(prevKey, poolId,
maxRepliesPerRequest));
} }
@Override @Override
@ -1244,52 +1243,50 @@ class NameNodeRpcServer implements NamenodeProtocols {
return entry.getEntryId(); return entry.getEntryId();
} }
} }
@Override @Override
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId, public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
long poolId, int maxReplies) throws IOException { String pool) throws IOException {
return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, poolId); return new ServerSidePathCacheEntriesIterator(prevId, pool);
} }
@Override @Override
public CachePool addCachePool(CachePoolInfo info) throws IOException { public void addCachePool(CachePoolInfo info) throws IOException {
return namesystem.addCachePool(info); namesystem.addCachePool(info);
} }
@Override @Override
public void modifyCachePool(long poolId, CachePoolInfo info) public void modifyCachePool(CachePoolInfo info) throws IOException {
throws IOException { namesystem.modifyCachePool(info);
namesystem.modifyCachePool(poolId, info);
} }
@Override @Override
public void removeCachePool(long poolId) throws IOException { public void removeCachePool(String cachePoolName) throws IOException {
namesystem.removeCachePool(poolId); namesystem.removeCachePool(cachePoolName);
} }
private class ServerSideCachePoolIterator private class ServerSideCachePoolIterator
extends BatchedRemoteIterator<Long, CachePool> { extends BatchedRemoteIterator<String, CachePoolInfo> {
public ServerSideCachePoolIterator(long prevId, int maxRepliesPerRequest) { public ServerSideCachePoolIterator(String prevKey) {
super(prevId, maxRepliesPerRequest); super(prevKey);
} }
@Override @Override
public BatchedEntries<CachePool> makeRequest(Long prevId, public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
int maxRepliesPerRequest) throws IOException { throws IOException {
return new BatchedListEntries<CachePool>( return namesystem.listCachePools(prevKey);
namesystem.listCachePools(prevId, maxRepliesPerRequest));
} }
@Override @Override
public Long elementToPrevKey(CachePool element) { public String elementToPrevKey(CachePoolInfo element) {
return element.getId(); return element.getPoolName();
} }
} }
@Override @Override
public RemoteIterator<CachePool> listCachePools(long prevPoolId, public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
int maxRepliesPerRequest) throws IOException { throws IOException {
return new ServerSideCachePoolIterator(prevPoolId, maxRepliesPerRequest); return new ServerSideCachePoolIterator(prevKey);
} }
} }

View File

@ -363,27 +363,9 @@ message IsFileClosedResponseProto {
required bool result = 1; required bool result = 1;
} }
message CachePoolInfoProto {
optional string poolName = 1;
optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
}
message CachePoolProto {
optional int64 id = 1;
optional CachePoolInfoProto info = 2;
}
message PathCacheDirectiveProto { message PathCacheDirectiveProto {
required string path = 1; required string path = 1;
required CachePoolProto pool = 2; required string pool = 2;
}
message PathCacheEntryProto {
required int64 id = 1;
optional PathCacheDirectiveProto directive = 2;
} }
message AddPathCacheDirectivesRequestProto { message AddPathCacheDirectivesRequestProto {
@ -417,42 +399,52 @@ enum RemovePathCacheEntryErrorProto {
} }
message ListPathCacheEntriesRequestProto { message ListPathCacheEntriesRequestProto {
required PathCacheEntryProto prevEntry = 1; required int64 prevId = 1;
required CachePoolProto pool = 2; required string pool = 2;
optional int32 maxReplies = 3; }
message ListPathCacheEntriesElementProto {
required int64 id = 1;
required string path = 2;
required string pool = 3;
} }
message ListPathCacheEntriesResponseProto { message ListPathCacheEntriesResponseProto {
repeated PathCacheEntryProto entries = 1; repeated ListPathCacheEntriesElementProto elements = 1;
required bool hasMore = 2; required bool hasMore = 2;
} }
message AddCachePoolRequestProto { message AddCachePoolRequestProto {
required CachePoolInfoProto info = 1; required string poolName = 1;
optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
} }
message AddCachePoolResponseProto { message AddCachePoolResponseProto { // void response
required CachePoolProto pool = 1;
} }
message ModifyCachePoolRequestProto { message ModifyCachePoolRequestProto {
required CachePoolProto pool = 1; required string poolName = 1;
required CachePoolInfoProto info = 2; optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
} }
message ModifyCachePoolResponseProto { // void response message ModifyCachePoolResponseProto { // void response
} }
message RemoveCachePoolRequestProto { message RemoveCachePoolRequestProto {
required CachePoolProto pool = 1; required string poolName = 1;
} }
message RemoveCachePoolResponseProto { // void response message RemoveCachePoolResponseProto { // void response
} }
message ListCachePoolsRequestProto { message ListCachePoolsRequestProto {
required CachePoolProto prevPool = 1; required string prevPoolName = 1;
required int32 maxReplies = 2;
} }
message ListCachePoolsResponseProto { message ListCachePoolsResponseProto {
@ -461,7 +453,11 @@ message ListCachePoolsResponseProto {
} }
message ListCachePoolsResponseElementProto { message ListCachePoolsResponseElementProto {
required CachePoolProto pool = 1; required string poolName = 1;
required string ownerName = 2;
required string groupName = 3;
required int32 mode = 4;
required int32 weight = 5;
} }
message GetFileLinkInfoRequestProto { message GetFileLinkInfoRequestProto {

View File

@ -17,7 +17,7 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.*;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
@ -31,64 +31,58 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.Fallible;
import org.junit.After;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
public class TestPathCacheRequests { public class TestPathCacheRequests {
static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class); static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class);
private static Configuration conf = new HdfsConfiguration(); private static final UserGroupInformation unprivilegedUser =
private static MiniDFSCluster cluster = null; UserGroupInformation.createRemoteUser("unprivilegedUser");
private static NamenodeProtocols proto = null;
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
proto = cluster.getNameNodeRpc();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
@Test @Test
public void testCreateAndRemovePools() throws Exception { public void testCreateAndRemovePools() throws Exception {
CachePoolInfo req = Configuration conf = new HdfsConfiguration();
CachePoolInfo.newBuilder().setPoolName("pool1").setOwnerName("bob") MiniDFSCluster cluster = null;
.setGroupName("bobgroup").setMode(new FsPermission((short) 0755))
.setWeight(150).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
CachePool pool = proto.addCachePool(req); cluster.waitActive();
NamenodeProtocols proto = cluster.getNameNodeRpc();
CachePoolInfo req = new CachePoolInfo("pool1").
setOwnerName("bob").setGroupName("bobgroup").
setMode(new FsPermission((short)0755)).setWeight(150);
proto.addCachePool(req);
try { try {
proto.removeCachePool(909); proto.removeCachePool("pool99");
Assert.fail("expected to get an exception when " + Assert.fail("expected to get an exception when " +
"removing a non-existent pool."); "removing a non-existent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("can't remove " +
"nonexistent cache pool", ioe);
} }
proto.removeCachePool(pool.getId()); proto.removeCachePool("pool1");
try { try {
proto.removeCachePool(pool.getId()); proto.removeCachePool("pool1");
Assert.fail("expected to get an exception when " + Assert.fail("expected to get an exception when " +
"removing a non-existent pool."); "removing a non-existent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("can't remove " +
"nonexistent cache pool", ioe);
} }
req = new CachePoolInfo("pool2"); req = new CachePoolInfo("pool2");
proto.addCachePool(req); proto.addCachePool(req);
@ -96,42 +90,36 @@ public class TestPathCacheRequests {
@Test @Test
public void testCreateAndModifyPools() throws Exception { public void testCreateAndModifyPools() throws Exception {
// Create a new pool Configuration conf = new HdfsConfiguration();
CachePoolInfo info = CachePoolInfo.newBuilder(). MiniDFSCluster cluster = null;
setPoolName("pool1"). // set low limits here for testing purposes
setOwnerName("abc"). conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2);
setGroupName("123"). conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2);
setMode(new FsPermission((short)0755)). cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
setWeight(150). cluster.waitActive();
build(); NamenodeProtocols proto = cluster.getNameNodeRpc();
CachePool pool = proto.addCachePool(info); proto.addCachePool(new CachePoolInfo("pool1").
CachePoolInfo actualInfo = pool.getInfo(); setOwnerName("abc").setGroupName("123").
assertEquals("Expected info to match create time settings", setMode(new FsPermission((short)0755)).setWeight(150));
info, actualInfo); proto.modifyCachePool(new CachePoolInfo("pool1").
// Modify the pool setOwnerName("def").setGroupName("456"));
info = CachePoolInfo.newBuilder(). RemoteIterator<CachePoolInfo> iter = proto.listCachePools("");
setPoolName("pool2"). CachePoolInfo info = iter.next();
setOwnerName("def"). assertEquals("pool1", info.getPoolName());
setGroupName("456"). assertEquals("def", info.getOwnerName());
setMode(new FsPermission((short)0644)). assertEquals("456", info.getGroupName());
setWeight(200). assertEquals(new FsPermission((short)0755), info.getMode());
build(); assertEquals(Integer.valueOf(150), info.getWeight());
proto.modifyCachePool(pool.getId(), info);
// Check via listing this time
RemoteIterator<CachePool> iter = proto.listCachePools(0, 1);
CachePool listedPool = iter.next();
actualInfo = listedPool.getInfo();
assertEquals("Expected info to match modified settings", info, actualInfo);
try { try {
proto.removeCachePool(808); proto.removeCachePool("pool99");
Assert.fail("expected to get an exception when " + Assert.fail("expected to get an exception when " +
"removing a non-existent pool."); "removing a non-existent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
} }
proto.removeCachePool(pool.getId()); proto.removeCachePool("pool1");
try { try {
proto.removeCachePool(pool.getId()); proto.removeCachePool("pool1");
Assert.fail("expected to get an exception when " + Assert.fail("expected to get an exception when " +
"removing a non-existent pool."); "removing a non-existent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
@ -142,13 +130,13 @@ public class TestPathCacheRequests {
RemoteIterator<PathCacheEntry> iter, RemoteIterator<PathCacheEntry> iter,
long id0, long id1, long id2) throws Exception { long id0, long id1, long id2) throws Exception {
Assert.assertEquals(new PathCacheEntry(id0, Assert.assertEquals(new PathCacheEntry(id0,
new PathCacheDirective("/alpha", 1)), new PathCacheDirective("/alpha", "pool1")),
iter.next()); iter.next());
Assert.assertEquals(new PathCacheEntry(id1, Assert.assertEquals(new PathCacheEntry(id1,
new PathCacheDirective("/beta", 2)), new PathCacheDirective("/beta", "pool2")),
iter.next()); iter.next());
Assert.assertEquals(new PathCacheEntry(id2, Assert.assertEquals(new PathCacheEntry(id2,
new PathCacheDirective("/gamma", 1)), new PathCacheDirective("/gamma", "pool1")),
iter.next()); iter.next());
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
} }
@ -161,36 +149,34 @@ public class TestPathCacheRequests {
try { try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive(); cluster.waitActive();
final CachePool pool1 = proto.addCachePool(new CachePoolInfo("pool1")); final NamenodeProtocols proto = cluster.getNameNodeRpc();
final CachePool pool2 = proto.addCachePool(new CachePoolInfo("pool2")); proto.addCachePool(new CachePoolInfo("pool1").
final CachePool pool3 = proto.addCachePool(new CachePoolInfo("pool3")); setMode(new FsPermission((short)0777)));
final CachePool pool4 = proto.addCachePool(CachePoolInfo.newBuilder() proto.addCachePool(new CachePoolInfo("pool2").
.setPoolName("pool4") setMode(new FsPermission((short)0777)));
.setMode(new FsPermission((short)0)).build()); proto.addCachePool(new CachePoolInfo("pool3").
UserGroupInformation testUgi = UserGroupInformation setMode(new FsPermission((short)0777)));
.createUserForTesting("myuser", new String[]{"mygroup"}); proto.addCachePool(new CachePoolInfo("pool4").
List<Fallible<PathCacheEntry>> addResults1 = testUgi.doAs( setMode(new FsPermission((short)0)));
new PrivilegedExceptionAction<List<Fallible<PathCacheEntry>>>() {
@Override List<Fallible<PathCacheEntry>> addResults1 =
public List<Fallible<PathCacheEntry>> run() throws IOException { unprivilegedUser.doAs(new PrivilegedExceptionAction<
List<Fallible<PathCacheEntry>> entries; List<Fallible<PathCacheEntry>>>() {
entries = proto.addPathCacheDirectives( @Override
Arrays.asList(new PathCacheDirective[] { public List<Fallible<PathCacheEntry>> run() throws IOException {
new PathCacheDirective("/alpha", pool1.getId()), return proto.addPathCacheDirectives(Arrays.asList(
new PathCacheDirective("/beta", pool2.getId()), new PathCacheDirective[] {
new PathCacheDirective("", pool3.getId()), new PathCacheDirective("/alpha", "pool1"),
new PathCacheDirective("/zeta", 404), new PathCacheDirective("/beta", "pool2"),
new PathCacheDirective("/zeta", pool4.getId()) new PathCacheDirective("", "pool3"),
})); new PathCacheDirective("/zeta", "nonexistent_pool"),
return entries; new PathCacheDirective("/zeta", "pool4")
}));
} }
}); });
// Save the successful additions
long ids1[] = new long[2]; long ids1[] = new long[2];
for (int i=0; i<2; i++) { ids1[0] = addResults1.get(0).get().getEntryId();
ids1[i] = addResults1.get(i).get().getEntryId(); ids1[1] = addResults1.get(1).get().getEntryId();
}
// Verify that the unsuccessful additions failed properly
try { try {
addResults1.get(2).get(); addResults1.get(2).get();
Assert.fail("expected an error when adding an empty path"); Assert.fail("expected an error when adding an empty path");
@ -201,7 +187,7 @@ public class TestPathCacheRequests {
addResults1.get(3).get(); addResults1.get(3).get();
Assert.fail("expected an error when adding to a nonexistent pool."); Assert.fail("expected an error when adding to a nonexistent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError); Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError);
} }
try { try {
addResults1.get(4).get(); addResults1.get(4).get();
@ -215,10 +201,10 @@ public class TestPathCacheRequests {
List<Fallible<PathCacheEntry>> addResults2 = List<Fallible<PathCacheEntry>> addResults2 =
proto.addPathCacheDirectives(Arrays.asList( proto.addPathCacheDirectives(Arrays.asList(
new PathCacheDirective[] { new PathCacheDirective[] {
new PathCacheDirective("/alpha", pool1.getId()), new PathCacheDirective("/alpha", "pool1"),
new PathCacheDirective("/theta", 404), new PathCacheDirective("/theta", ""),
new PathCacheDirective("bogus", pool1.getId()), new PathCacheDirective("bogus", "pool1"),
new PathCacheDirective("/gamma", pool1.getId()) new PathCacheDirective("/gamma", "pool1")
})); }));
long id = addResults2.get(0).get().getEntryId(); long id = addResults2.get(0).get().getEntryId();
Assert.assertEquals("expected to get back the same ID as last time " + Assert.assertEquals("expected to get back the same ID as last time " +
@ -228,7 +214,7 @@ public class TestPathCacheRequests {
Assert.fail("expected an error when adding a path cache " + Assert.fail("expected an error when adding a path cache " +
"directive with an empty pool name."); "directive with an empty pool name.");
} catch (IOException ioe) { } catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError); Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError);
} }
try { try {
addResults2.get(2).get(); addResults2.get(2).get();
@ -240,16 +226,14 @@ public class TestPathCacheRequests {
long ids2[] = new long[1]; long ids2[] = new long[1];
ids2[0] = addResults2.get(3).get().getEntryId(); ids2[0] = addResults2.get(3).get().getEntryId();
// Validate listing all entries
RemoteIterator<PathCacheEntry> iter = RemoteIterator<PathCacheEntry> iter =
proto.listPathCacheEntries(-1l, -1l, 100); proto.listPathCacheEntries(0, "");
validateListAll(iter, ids1[0], ids1[1], ids2[0]); validateListAll(iter, ids1[0], ids1[1], ids2[0]);
iter = proto.listPathCacheEntries(-1l, -1l, 1); iter = proto.listPathCacheEntries(0, "");
validateListAll(iter, ids1[0], ids1[1], ids2[0]); validateListAll(iter, ids1[0], ids1[1], ids2[0]);
// Validate listing certain pools iter = proto.listPathCacheEntries(0, "pool3");
iter = proto.listPathCacheEntries(0, pool3.getId(), 1);
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
iter = proto.listPathCacheEntries(0, pool2.getId(), 4444); iter = proto.listPathCacheEntries(0, "pool2");
Assert.assertEquals(addResults1.get(1).get(), Assert.assertEquals(addResults1.get(1).get(),
iter.next()); iter.next());
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
@ -271,7 +255,7 @@ public class TestPathCacheRequests {
} catch (IOException ioe) { } catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException);
} }
iter = proto.listPathCacheEntries(0, pool2.getId(), 4444); iter = proto.listPathCacheEntries(0, "pool2");
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
} finally { } finally {
if (cluster != null) { cluster.shutdown(); } if (cluster != null) { cluster.shutdown(); }