HDFS-5163. Miscellaneous cache pool RPC fixes (Contributed by Colin Patrick McCabe)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1520665 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-09-06 18:52:50 +00:00
parent d56d0b46e1
commit f41f8b8842
19 changed files with 788 additions and 797 deletions

View File

@ -28,13 +28,16 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
public interface BatchedEntries<E> {
public E get(int i);
public int size();
public boolean hasMore();
}
public static class BatchedListEntries<E> implements BatchedEntries<E> {
private final List<E> entries;
private final boolean hasMore;
public BatchedListEntries(List<E> entries) {
public BatchedListEntries(List<E> entries, boolean hasMore) {
this.entries = entries;
this.hasMore = hasMore;
}
public E get(int i) {
@ -44,16 +47,18 @@ public E get(int i) {
public int size() {
return entries.size();
}
public boolean hasMore() {
return hasMore;
}
}
private K prevKey;
private final int maxRepliesPerRequest;
private BatchedEntries<E> entries;
private int idx;
public BatchedRemoteIterator(K prevKey, int maxRepliesPerRequest) {
public BatchedRemoteIterator(K prevKey) {
this.prevKey = prevKey;
this.maxRepliesPerRequest = maxRepliesPerRequest;
this.entries = null;
this.idx = -1;
}
@ -62,21 +67,14 @@ public BatchedRemoteIterator(K prevKey, int maxRepliesPerRequest) {
* Perform the actual remote request.
*
* @param key The key to send.
* @param maxRepliesPerRequest The maximum number of replies to allow.
* @return A list of replies.
*/
public abstract BatchedEntries<E> makeRequest(K prevKey,
int maxRepliesPerRequest) throws IOException;
public abstract BatchedEntries<E> makeRequest(K prevKey) throws IOException;
private void makeRequest() throws IOException {
idx = 0;
entries = null;
entries = makeRequest(prevKey, maxRepliesPerRequest);
if (entries.size() > maxRepliesPerRequest) {
throw new IOException("invalid number of replies returned: got " +
entries.size() + ", expected " + maxRepliesPerRequest +
" at most.");
}
entries = makeRequest(prevKey);
if (entries.size() == 0) {
entries = null;
}
@ -86,7 +84,7 @@ private void makeRequestIfNeeded() throws IOException {
if (idx == -1) {
makeRequest();
} else if ((entries != null) && (idx >= entries.size())) {
if (entries.size() < maxRepliesPerRequest) {
if (!entries.hasMore()) {
// Last time, we got fewer entries than requested.
// So we should be at the end.
entries = null;

View File

@ -303,6 +303,13 @@ public static FsPermission getFileDefault() {
return new FsPermission((short)00666);
}
/**
* Get the default permission for cache pools.
*/
public static FsPermission getCachePoolDefault() {
return new FsPermission((short)00755);
}
/**
* Create a FsPermission from a Unix symbolic permission string
* @param unixSymbolicPermission e.g. "-rw-rw-rw-"

View File

@ -1253,6 +1253,14 @@ public String getShortUserName() {
return null;
}
public String getPrimaryGroupName() throws IOException {
String[] groups = getGroupNames();
if (groups.length == 0) {
throw new IOException("There is no primary group for UGI " + this);
}
return groups[0];
}
/**
* Get the user's full principal name.
* @return the user's full principal name.

View File

@ -24,6 +24,9 @@ HDFS-4949 (Unreleased)
HDFS-5121. Add RPCs for creating and manipulating cache pools.
(Contributed by Colin Patrick McCabe)
HDFS-5163. Miscellaneous cache pool RPC fixes. (Contributed by Colin
Patrick McCabe)
OPTIMIZATIONS

View File

@ -195,6 +195,13 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY = "dfs.datanode.socket.reuse.keepalive";
public static final int DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_DEFAULT = 1000;
public static final String DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES =
"dfs.namenode.list.cache.pools.num.responses";
public static final int DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT = 100;
public static final String DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES =
"dfs.namenode.list.cache.directives.num.responses";
public static final int DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT = 100;
// Whether to enable datanode's stale state detection and usage for reads
public static final String DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_KEY = "dfs.namenode.avoid.read.stale.datanode";
public static final boolean DFS_NAMENODE_AVOID_STALE_DATANODE_FOR_READ_DEFAULT = false;

View File

@ -56,12 +56,12 @@ public InvalidPathNameError(PathCacheDirective directive) {
}
}
public static class InvalidPoolError
public static class InvalidPoolNameError
extends AddPathCacheDirectiveException {
private static final long serialVersionUID = 1L;
public InvalidPoolError(PathCacheDirective directive) {
super("invalid pool id " + directive.getPoolId(), directive);
public InvalidPoolNameError(PathCacheDirective directive) {
super("invalid pool name '" + directive.getPool() + "'", directive);
}
}
@ -70,7 +70,7 @@ public static class PoolWritePermissionDeniedError
private static final long serialVersionUID = 1L;
public PoolWritePermissionDeniedError(PathCacheDirective directive) {
super("write permission denied for pool id " + directive.getPoolId(),
super("write permission denied for pool '" + directive.getPool() + "'",
directive);
}
}
@ -82,9 +82,7 @@ public static class UnexpectedAddPathCacheDirectiveException
public UnexpectedAddPathCacheDirectiveException(
PathCacheDirective directive) {
super("encountered an unexpected error when trying to " +
"add path cache directive to pool id " + directive.getPoolId() +
" " + directive,
directive);
"add path cache directive " + directive, directive);
}
}
};

View File

@ -18,45 +18,38 @@
package org.apache.hadoop.hdfs.protocol;
import javax.annotation.Nullable;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission;
import com.google.common.base.Preconditions;
/**
* Information about a cache pool.
*
* CachePoolInfo permissions roughly map to Unix file permissions.
* Write permissions allow addition and removal of a {@link PathCacheEntry} from
* the pool. Execute permissions allow listing of PathCacheEntries in a pool.
* Read permissions have no associated meaning.
*/
@InterfaceAudience.Private
@InterfaceStability.Evolving
public class CachePoolInfo {
final String poolName;
private String poolName;
private String ownerName;
private String groupName;
private FsPermission mode;
private Integer weight;
@Nullable
String ownerName;
/**
* For Builder use
*/
private CachePoolInfo() {}
@Nullable
String groupName;
@Nullable
FsPermission mode;
@Nullable
Integer weight;
/**
* Use a CachePoolInfo {@link Builder} to create a new CachePoolInfo with
* more parameters
*/
public CachePoolInfo(String poolName) {
this.poolName = poolName;
}
public String getPoolName() {
return poolName;
}
@ -65,103 +58,73 @@ public String getOwnerName() {
return ownerName;
}
public CachePoolInfo setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
public String getGroupName() {
return groupName;
}
public CachePoolInfo setGroupName(String groupName) {
this.groupName = groupName;
return this;
}
public FsPermission getMode() {
return mode;
}
public CachePoolInfo setMode(FsPermission mode) {
this.mode = mode;
return this;
}
public Integer getWeight() {
return weight;
}
public CachePoolInfo setWeight(Integer weight) {
this.weight = weight;
return this;
}
public String toString() {
return new StringBuilder().
append("{ ").append("poolName:").append(poolName).
append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName).
append(", mode:").append(mode).
append(", weight:").append(weight).
append(" }").toString();
return new StringBuilder().append("{").
append("poolName:").append(poolName).
append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName).
append(", mode:").append((mode == null) ? "null" :
String.format("0%03o", mode)).
append(", weight:").append(weight).
append("}").toString();
}
@Override
public boolean equals(Object o) {
try {
CachePoolInfo other = (CachePoolInfo)o;
return new EqualsBuilder().
append(poolName, other.poolName).
append(ownerName, other.ownerName).
append(groupName, other.groupName).
append(mode, other.mode).
append(weight, other.weight).
isEquals();
} catch (ClassCastException e) {
return false;
}
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(poolName).append(ownerName)
.append(groupName).append(mode.toShort()).append(weight).hashCode();
return new HashCodeBuilder().
append(poolName).
append(ownerName).
append(groupName).
append(mode).
append(weight).
hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) { return false; }
if (obj == this) { return true; }
if (obj.getClass() != getClass()) {
return false;
}
CachePoolInfo rhs = (CachePoolInfo)obj;
return new EqualsBuilder()
.append(poolName, rhs.poolName)
.append(ownerName, rhs.ownerName)
.append(groupName, rhs.groupName)
.append(mode, rhs.mode)
.append(weight, rhs.weight)
.isEquals();
}
public static Builder newBuilder() {
return new Builder();
}
public static Builder newBuilder(CachePoolInfo info) {
return new Builder(info);
}
/**
* CachePoolInfo Builder
*/
public static class Builder {
private CachePoolInfo info;
public Builder() {
this.info = new CachePoolInfo();
}
public Builder(CachePoolInfo info) {
this.info = info;
}
public CachePoolInfo build() {
Preconditions.checkNotNull(info.poolName,
"Cannot create a CachePoolInfo without a pool name");
return info;
}
public Builder setPoolName(String poolName) {
info.poolName = poolName;
return this;
}
public Builder setOwnerName(String ownerName) {
info.ownerName = ownerName;
return this;
}
public Builder setGroupName(String groupName) {
info.groupName = groupName;
return this;
}
public Builder setMode(FsPermission mode) {
info.mode = mode;
return this;
}
public Builder setWeight(Integer weight) {
info.weight = weight;
return this;
}
}
}
}

View File

@ -38,7 +38,6 @@
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
@ -1107,8 +1106,9 @@ public SnapshotDiffReport getSnapshotDiffReport(String snapshotRoot,
* could not be added.
*/
@AtMostOnce
public List<Fallible<PathCacheEntry>> addPathCacheDirectives(
List<PathCacheDirective> directives) throws IOException;
public List<Fallible<PathCacheEntry>>
addPathCacheDirectives(List<PathCacheDirective> directives)
throws IOException;
/**
* Remove some path cache entries from the CacheManager.
@ -1117,7 +1117,7 @@ public List<Fallible<PathCacheEntry>> addPathCacheDirectives(
* @return A Fallible list where each element is either a successfully removed
* ID, or an IOException describing why the ID could not be removed.
*/
@Idempotent
@AtMostOnce
public List<Fallible<Long>> removePathCacheEntries(List<Long> ids)
throws IOException;
@ -1127,15 +1127,13 @@ public List<Fallible<Long>> removePathCacheEntries(List<Long> ids)
*
* @param prevId The last listed entry ID, or -1 if this is the first call to
* listPathCacheEntries.
* @param pool The cache pool to list, or -1 to list all pools
* @param maxRepliesPerRequest The maximum number of entries to return per
* request
* @param pool The cache pool to list, or the empty string to list all pools
* @return A RemoteIterator which returns PathCacheEntry objects.
*/
@Idempotent
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
long poolId, int maxRepliesPerRequest) throws IOException;
String pool) throws IOException;
/**
* Add a new cache pool.
*
@ -1143,39 +1141,37 @@ public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
* @throws IOException If the request could not be completed.
*/
@AtMostOnce
public CachePool addCachePool(CachePoolInfo info) throws IOException;
public void addCachePool(CachePoolInfo info) throws IOException;
/**
* Modify a cache pool, e.g. pool name, permissions, owner, group.
*
* @param poolId ID of the cache pool to modify
* @param info New metadata for the cache pool
* @throws IOException If the request could not be completed.
* Modify a cache pool.
*
* @param req
* The request to modify a cache pool.
* @throws IOException
* If the request could not be completed.
*/
@AtMostOnce
public void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException;
public void modifyCachePool(CachePoolInfo req) throws IOException;
/**
* Remove a cache pool.
*
* @param poolId ID of the cache pool to remove.
* @param pool name of the cache pool to remove.
* @throws IOException if the cache pool did not exist, or could not be
* removed.
*/
@Idempotent
public void removeCachePool(long poolId) throws IOException;
@AtMostOnce
public void removeCachePool(String pool) throws IOException;
/**
* List the set of cache pools. Incrementally fetches results from the server.
*
* @param prevPoolId ID of the last pool listed, or -1 if this is the first
* invocation of listCachePools
* @param maxRepliesPerRequest Maximum number of cache pools to return per
* server request.
* @param prevPool name of the last pool listed, or the empty string if this is
* the first invocation of listCachePools
* @return A RemoteIterator which returns CachePool objects.
*/
@Idempotent
public RemoteIterator<CachePool> listCachePools(long prevPoolId,
int maxRepliesPerRequest) throws IOException;
public RemoteIterator<CachePoolInfo> listCachePools(String prevPool)
throws IOException;
}

View File

@ -25,7 +25,7 @@
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
/**
@ -33,13 +33,14 @@
*/
public class PathCacheDirective implements Comparable<PathCacheDirective> {
private final String path;
private final long poolId;
public PathCacheDirective(String path, long poolId) {
private final String pool;
public PathCacheDirective(String path, String pool) {
Preconditions.checkNotNull(path);
Preconditions.checkArgument(poolId > 0);
Preconditions.checkNotNull(pool);
this.path = path;
this.poolId = poolId;
this.pool = pool;
}
/**
@ -52,8 +53,8 @@ public String getPath() {
/**
* @return The pool used in this request.
*/
public long getPoolId() {
return poolId;
public String getPool() {
return pool;
}
/**
@ -69,22 +70,22 @@ public void validate() throws IOException {
if (!DFSUtil.isValidName(path)) {
throw new InvalidPathNameError(this);
}
if (poolId <= 0) {
throw new InvalidPoolError(this);
if (pool.isEmpty()) {
throw new InvalidPoolNameError(this);
}
}
@Override
public int compareTo(PathCacheDirective rhs) {
return ComparisonChain.start().
compare(poolId, rhs.getPoolId()).
compare(pool, rhs.getPool()).
compare(path, rhs.getPath()).
result();
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(path).append(poolId).hashCode();
return new HashCodeBuilder().append(path).append(pool).hashCode();
}
@Override
@ -101,7 +102,7 @@ public boolean equals(Object o) {
public String toString() {
StringBuilder builder = new StringBuilder();
builder.append("{ path:").append(path).
append(", poolId:").append(poolId).
append(", pool:").append(pool).
append(" }");
return builder.toString();
}

View File

@ -27,9 +27,11 @@
import org.apache.hadoop.fs.FsServerDefaults;
import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -112,6 +114,7 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
@ -171,6 +174,7 @@
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
@ -1035,19 +1039,16 @@ public IsFileClosedResponseProto isFileClosed(
}
@Override
public AddPathCacheDirectivesResponseProto addPathCacheDirectives(
RpcController controller, AddPathCacheDirectivesRequestProto request)
throws ServiceException {
public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController controller,
AddPathCacheDirectivesRequestProto request) throws ServiceException {
try {
ArrayList<PathCacheDirective> input =
new ArrayList<PathCacheDirective>(request.getElementsCount());
for (int i = 0; i < request.getElementsCount(); i++) {
PathCacheDirectiveProto proto = request.getElements(i);
input.add(new PathCacheDirective(proto.getPath(),
proto.getPool().getId()));
input.add(new PathCacheDirective(proto.getPath(), proto.getPool()));
}
List<Fallible<PathCacheEntry>> output = server
.addPathCacheDirectives(input);
List<Fallible<PathCacheEntry>> output = server.addPathCacheDirectives(input);
AddPathCacheDirectivesResponseProto.Builder builder =
AddPathCacheDirectivesResponseProto.newBuilder();
for (int idx = 0; idx < output.size(); idx++) {
@ -1060,7 +1061,7 @@ public AddPathCacheDirectivesResponseProto addPathCacheDirectives(
} catch (InvalidPathNameError ioe) {
builder.addResults(AddPathCacheDirectiveErrorProto.
INVALID_PATH_NAME_ERROR_VALUE);
} catch (InvalidPoolError ioe) {
} catch (InvalidPoolNameError ioe) {
builder.addResults(AddPathCacheDirectiveErrorProto.
INVALID_POOL_NAME_ERROR_VALUE);
} catch (IOException ioe) {
@ -1108,21 +1109,20 @@ public RemovePathCacheEntriesResponseProto removePathCacheEntries(
}
@Override
public ListPathCacheEntriesResponseProto listPathCacheEntries(
RpcController controller, ListPathCacheEntriesRequestProto request)
throws ServiceException {
public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController controller,
ListPathCacheEntriesRequestProto request) throws ServiceException {
try {
CachePool pool = PBHelper.convert(request.getPool());
RemoteIterator<PathCacheEntry> iter =
server.listPathCacheEntries(
PBHelper.convert(request.getPrevEntry()).getEntryId(),
pool.getId(),
request.getMaxReplies());
server.listPathCacheEntries(request.getPrevId(), request.getPool());
ListPathCacheEntriesResponseProto.Builder builder =
ListPathCacheEntriesResponseProto.newBuilder();
while (iter.hasNext()) {
PathCacheEntry entry = iter.next();
builder.addEntries(PBHelper.convert(entry));
builder.addElements(
ListPathCacheEntriesElementProto.newBuilder().
setId(entry.getEntryId()).
setPath(entry.getDirective().getPath()).
setPool(entry.getDirective().getPool()));
}
return builder.build();
} catch (IOException e) {
@ -1134,20 +1134,46 @@ public ListPathCacheEntriesResponseProto listPathCacheEntries(
public AddCachePoolResponseProto addCachePool(RpcController controller,
AddCachePoolRequestProto request) throws ServiceException {
try {
server.addCachePool(PBHelper.convert(request.getInfo()));
CachePoolInfo info =
new CachePoolInfo(request.getPoolName());
if (request.hasOwnerName()) {
info.setOwnerName(request.getOwnerName());
}
if (request.hasGroupName()) {
info.setGroupName(request.getGroupName());
}
if (request.hasMode()) {
info.setMode(new FsPermission((short)request.getMode()));
}
if (request.hasWeight()) {
info.setWeight(request.getWeight());
}
server.addCachePool(info);
return AddCachePoolResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
}
}
@Override
public ModifyCachePoolResponseProto modifyCachePool(RpcController controller,
ModifyCachePoolRequestProto request) throws ServiceException {
try {
server.modifyCachePool(
PBHelper.convert(request.getPool()).getId(),
PBHelper.convert(request.getInfo()));
CachePoolInfo info =
new CachePoolInfo(request.getPoolName());
if (request.hasOwnerName()) {
info.setOwnerName(request.getOwnerName());
}
if (request.hasGroupName()) {
info.setGroupName(request.getGroupName());
}
if (request.hasMode()) {
info.setMode(new FsPermission((short)request.getMode()));
}
if (request.hasWeight()) {
info.setWeight(request.getWeight());
}
server.modifyCachePool(info);
return ModifyCachePoolResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
@ -1158,7 +1184,7 @@ public ModifyCachePoolResponseProto modifyCachePool(RpcController controller,
public RemoveCachePoolResponseProto removeCachePool(RpcController controller,
RemoveCachePoolRequestProto request) throws ServiceException {
try {
server.removeCachePool(PBHelper.convert(request.getPool()).getId());
server.removeCachePool(request.getPoolName());
return RemoveCachePoolResponseProto.newBuilder().build();
} catch (IOException e) {
throw new ServiceException(e);
@ -1169,16 +1195,27 @@ public RemoveCachePoolResponseProto removeCachePool(RpcController controller,
public ListCachePoolsResponseProto listCachePools(RpcController controller,
ListCachePoolsRequestProto request) throws ServiceException {
try {
RemoteIterator<CachePool> iter =
server.listCachePools(PBHelper.convert(request.getPrevPool()).getId(),
request.getMaxReplies());
RemoteIterator<CachePoolInfo> iter =
server.listCachePools(request.getPrevPoolName());
ListCachePoolsResponseProto.Builder responseBuilder =
ListCachePoolsResponseProto.newBuilder();
while (iter.hasNext()) {
CachePool pool = iter.next();
ListCachePoolsResponseElementProto.Builder elemBuilder =
CachePoolInfo pool = iter.next();
ListCachePoolsResponseElementProto.Builder elemBuilder =
ListCachePoolsResponseElementProto.newBuilder();
elemBuilder.setPool(PBHelper.convert(pool));
elemBuilder.setPoolName(pool.getPoolName());
if (pool.getOwnerName() != null) {
elemBuilder.setOwnerName(pool.getOwnerName());
}
if (pool.getGroupName() != null) {
elemBuilder.setGroupName(pool.getGroupName());
}
if (pool.getMode() != null) {
elemBuilder.setMode(pool.getMode().toShort());
}
if (pool.getWeight() != null) {
elemBuilder.setWeight(pool.getWeight());
}
responseBuilder.addElements(elemBuilder.build());
}
return responseBuilder.build();

View File

@ -23,6 +23,7 @@
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -37,12 +38,17 @@
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -55,18 +61,14 @@
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto;
@ -107,23 +109,23 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@ -144,7 +146,6 @@
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
@ -1026,7 +1027,7 @@ private static IOException addPathCacheDirectivesError(long code,
return new InvalidPathNameError(directive);
} else if (code == AddPathCacheDirectiveErrorProto.
INVALID_POOL_NAME_ERROR_VALUE) {
return new InvalidPoolError(directive);
return new InvalidPoolNameError(directive);
} else {
return new UnexpectedAddPathCacheDirectiveException(directive);
}
@ -1041,7 +1042,7 @@ public List<Fallible<PathCacheEntry>> addPathCacheDirectives(
for (PathCacheDirective directive : directives) {
builder.addElements(PathCacheDirectiveProto.newBuilder().
setPath(directive.getPath()).
setPool(PBHelper.convert(new CachePool(directive.getPoolId()))).
setPool(directive.getPool()).
build());
}
AddPathCacheDirectivesResponseProto result =
@ -1120,40 +1121,45 @@ private static class BatchedPathCacheEntries
@Override
public PathCacheEntry get(int i) {
PathCacheEntryProto entryProto = response.getEntries(i);
return PBHelper.convert(entryProto);
ListPathCacheEntriesElementProto elementProto =
response.getElements(i);
return new PathCacheEntry(elementProto.getId(),
new PathCacheDirective(elementProto.getPath(),
elementProto.getPool()));
}
@Override
public int size() {
return response.getEntriesCount();
return response.getElementsCount();
}
@Override
public boolean hasMore() {
return response.getHasMore();
}
}
private class PathCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathCacheEntry> {
private final long poolId;
private final String pool;
public PathCacheEntriesIterator(long prevKey, int maxRepliesPerRequest,
long poolId) {
super(prevKey, maxRepliesPerRequest);
this.poolId = poolId;
public PathCacheEntriesIterator(long prevKey, String pool) {
super(prevKey);
this.pool = pool;
}
@Override
public BatchedEntries<PathCacheEntry> makeRequest(
Long prevEntryId, int maxRepliesPerRequest) throws IOException {
Long nextKey) throws IOException {
ListPathCacheEntriesResponseProto response;
try {
ListPathCacheEntriesRequestProto req =
ListPathCacheEntriesRequestProto.newBuilder().
setPrevEntry(
PBHelper.convert(new PathCacheEntry(prevEntryId, null))).
setPool(PBHelper.convert(new CachePool(poolId))).
setMaxReplies(maxRepliesPerRequest).
setPrevId(nextKey).
setPool(pool).
build();
response = rpcProxy.listPathCacheEntries(null, req);
if (response.getEntriesCount() == 0) {
if (response.getElementsCount() == 0) {
response = null;
}
} catch (ServiceException e) {
@ -1170,30 +1176,51 @@ public Long elementToPrevKey(PathCacheEntry element) {
@Override
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
long poolId, int repliesPerRequest) throws IOException {
return new PathCacheEntriesIterator(prevId, repliesPerRequest, poolId);
String pool) throws IOException {
return new PathCacheEntriesIterator(prevId, pool);
}
@Override
public CachePool addCachePool(CachePoolInfo info) throws IOException {
AddCachePoolRequestProto.Builder builder =
public void addCachePool(CachePoolInfo info) throws IOException {
AddCachePoolRequestProto.Builder builder =
AddCachePoolRequestProto.newBuilder();
builder.setInfo(PBHelper.convert(info));
builder.setPoolName(info.getPoolName());
if (info.getOwnerName() != null) {
builder.setOwnerName(info.getOwnerName());
}
if (info.getGroupName() != null) {
builder.setGroupName(info.getGroupName());
}
if (info.getMode() != null) {
builder.setMode(info.getMode().toShort());
}
if (info.getWeight() != null) {
builder.setWeight(info.getWeight());
}
try {
return PBHelper.convert(
rpcProxy.addCachePool(null, builder.build()).getPool());
rpcProxy.addCachePool(null, builder.build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException {
ModifyCachePoolRequestProto.Builder builder =
ModifyCachePoolRequestProto.newBuilder()
.setPool(PBHelper.convert(new CachePool(poolId)))
.setInfo(PBHelper.convert(info));
public void modifyCachePool(CachePoolInfo req) throws IOException {
ModifyCachePoolRequestProto.Builder builder =
ModifyCachePoolRequestProto.newBuilder();
builder.setPoolName(req.getPoolName());
if (req.getOwnerName() != null) {
builder.setOwnerName(req.getOwnerName());
}
if (req.getGroupName() != null) {
builder.setGroupName(req.getGroupName());
}
if (req.getMode() != null) {
builder.setMode(req.getMode().toShort());
}
if (req.getWeight() != null) {
builder.setWeight(req.getWeight());
}
try {
rpcProxy.modifyCachePool(null, builder.build());
} catch (ServiceException e) {
@ -1202,69 +1229,74 @@ public void modifyCachePool(long poolId, CachePoolInfo info)
}
@Override
public void removeCachePool(long poolId) throws IOException {
public void removeCachePool(String cachePoolName) throws IOException {
try {
rpcProxy.removeCachePool(null,
rpcProxy.removeCachePool(null,
RemoveCachePoolRequestProto.newBuilder().
setPool(PBHelper.convert(new CachePool(poolId))).
build());
setPoolName(cachePoolName).build());
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
private static class BatchedPathDirectiveEntries
implements BatchedEntries<CachePool> {
implements BatchedEntries<CachePoolInfo> {
private final ListCachePoolsResponseProto proto;
public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) {
this.proto = proto;
}
@Override
public CachePool get(int i) {
public CachePoolInfo get(int i) {
ListCachePoolsResponseElementProto elem = proto.getElements(i);
return PBHelper.convert(elem.getPool());
return new CachePoolInfo(elem.getPoolName()).
setOwnerName(elem.getOwnerName()).
setGroupName(elem.getGroupName()).
setMode(new FsPermission((short)elem.getMode())).
setWeight(elem.getWeight());
}
@Override
public int size() {
return proto.getElementsCount();
}
@Override
public boolean hasMore() {
return proto.getHasMore();
}
}
private class CachePoolIterator
extends BatchedRemoteIterator<String, CachePoolInfo> {
private class CachePoolIterator
extends BatchedRemoteIterator<Long, CachePool> {
public CachePoolIterator(Long prevKey, int maxRepliesPerRequest) {
super(prevKey, maxRepliesPerRequest);
public CachePoolIterator(String prevKey) {
super(prevKey);
}
@Override
public BatchedEntries<CachePool> makeRequest(Long prevKey,
int maxRepliesPerRequest) throws IOException {
public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
throws IOException {
try {
return new BatchedPathDirectiveEntries(
rpcProxy.listCachePools(null,
rpcProxy.listCachePools(null,
ListCachePoolsRequestProto.newBuilder().
setPrevPool(PBHelper.convert(new CachePool(prevKey))).
setMaxReplies(maxRepliesPerRequest).
build()));
setPrevPoolName(prevKey).build()));
} catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e);
}
}
@Override
public Long elementToPrevKey(CachePool element) {
return element.getId();
public String elementToPrevKey(CachePoolInfo element) {
return element.getPoolName();
}
}
@Override
public RemoteIterator<CachePool> listCachePools(long prevPoolId,
int maxRepliesPerRequest) throws IOException {
return new CachePoolIterator(prevPoolId, maxRepliesPerRequest);
public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
throws IOException {
return new CachePoolIterator(prevKey);
}
}

View File

@ -32,13 +32,10 @@
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -53,15 +50,9 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
@ -123,7 +114,6 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
@ -1503,74 +1493,6 @@ public static HdfsProtos.ChecksumTypeProto convert(DataChecksum.Type type) {
return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
}
public static PathCacheDirective convert(
PathCacheDirectiveProto directiveProto) {
CachePool pool = convert(directiveProto.getPool());
return new PathCacheDirective(directiveProto.getPath(), pool.getId());
}
public static PathCacheDirectiveProto convert(PathCacheDirective directive) {
PathCacheDirectiveProto.Builder builder =
PathCacheDirectiveProto.newBuilder()
.setPath(directive.getPath())
.setPool(PBHelper.convert(new CachePool(directive.getPoolId())));
return builder.build();
}
public static PathCacheEntry convert(PathCacheEntryProto entryProto) {
long entryId = entryProto.getId();
PathCacheDirective directive = convert(entryProto.getDirective());
return new PathCacheEntry(entryId, directive);
}
public static PathCacheEntryProto convert(PathCacheEntry entry) {
PathCacheEntryProto.Builder builder = PathCacheEntryProto.newBuilder()
.setId(entry.getEntryId())
.setDirective(PBHelper.convert(entry.getDirective()));
return builder.build();
}
public static CachePoolInfo convert(CachePoolInfoProto infoProto) {
CachePoolInfo.Builder builder =
CachePoolInfo.newBuilder().setPoolName(infoProto.getPoolName());
if (infoProto.hasOwnerName()) {
builder.setOwnerName(infoProto.getOwnerName());
}
if (infoProto.hasGroupName()) {
builder.setGroupName(infoProto.getGroupName());
}
if (infoProto.hasMode()) {
builder.setMode(new FsPermission((short) infoProto.getMode()));
}
if (infoProto.hasWeight()) {
builder.setWeight(infoProto.getWeight());
}
return builder.build();
}
public static CachePoolInfoProto convert(CachePoolInfo info) {
CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder()
.setPoolName(info.getPoolName())
.setOwnerName(info.getOwnerName())
.setGroupName(info.getGroupName())
.setMode(info.getMode().toShort())
.setWeight(info.getWeight());
return builder.build();
}
public static CachePool convert(CachePoolProto poolProto) {
CachePoolInfo info = convert(poolProto.getInfo());
CachePool pool = new CachePool(poolProto.getId(), info);
return pool;
}
public static CachePoolProto convert(CachePool pool) {
CachePoolProto.Builder builder = CachePoolProto.newBuilder()
.setId(pool.getId())
.setInfo(convert(pool.getInfo()));
return builder.build();
}
public static InputStream vintPrefixed(final InputStream input)
throws IOException {
final int firstByte = input.read();

View File

@ -17,28 +17,34 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.TreeMap;
import java.util.Map.Entry;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.util.Fallible;
/**
@ -65,62 +71,58 @@ final class CacheManager {
/**
* Cache pools, sorted by name.
*/
private final TreeMap<String, CachePool> cachePoolsByName =
private final TreeMap<String, CachePool> cachePools =
new TreeMap<String, CachePool>();
/**
* Cache pools, sorted by ID
*/
private final TreeMap<Long, CachePool> cachePoolsById =
new TreeMap<Long, CachePool>();
/**
* The entry ID to use for a new entry.
*/
private long nextEntryId;
/**
* The pool ID to use for a new pool.
* Maximum number of cache pools to list in one operation.
*/
private long nextPoolId;
private final int maxListCachePoolsResponses;
/**
* Maximum number of cache pool directives to list in one operation.
*/
private final int maxListCacheDirectivesResponses;
CacheManager(FSDirectory dir, Configuration conf) {
// TODO: support loading and storing of the CacheManager state
clear();
maxListCachePoolsResponses = conf.getInt(
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES,
DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES_DEFAULT);
maxListCacheDirectivesResponses = conf.getInt(
DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES,
DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES_DEFAULT);
}
synchronized void clear() {
entriesById.clear();
entriesByDirective.clear();
cachePoolsByName.clear();
cachePoolsById.clear();
cachePools.clear();
nextEntryId = 1;
nextPoolId = 1;
}
synchronized long getNextEntryId() throws IOException {
if (nextEntryId == Long.MAX_VALUE) {
throw new IOException("no more available entry IDs");
throw new IOException("no more available IDs");
}
return nextEntryId++;
}
synchronized long getNextPoolId() throws IOException {
if (nextPoolId == Long.MAX_VALUE) {
throw new IOException("no more available pool IDs");
}
return nextPoolId++;
}
private synchronized Fallible<PathCacheEntry> addDirective(
FSPermissionChecker pc, PathCacheDirective directive) {
CachePool pool = cachePoolsById.get(directive.getPoolId());
PathCacheDirective directive, FSPermissionChecker pc) {
CachePool pool = cachePools.get(directive.getPool());
if (pool == null) {
LOG.info("addDirective " + directive + ": pool not found.");
return new Fallible<PathCacheEntry>(
new InvalidPoolError(directive));
new InvalidPoolNameError(directive));
}
if (!pc.checkPermission(pool, FsAction.WRITE)) {
if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) {
LOG.info("addDirective " + directive + ": write permission denied.");
return new Fallible<PathCacheEntry>(
new PoolWritePermissionDeniedError(directive));
@ -155,17 +157,17 @@ private synchronized Fallible<PathCacheEntry> addDirective(
}
public synchronized List<Fallible<PathCacheEntry>> addDirectives(
FSPermissionChecker pc, List<PathCacheDirective> directives) {
List<PathCacheDirective> directives, FSPermissionChecker pc) {
ArrayList<Fallible<PathCacheEntry>> results =
new ArrayList<Fallible<PathCacheEntry>>(directives.size());
for (PathCacheDirective directive: directives) {
results.add(addDirective(pc, directive));
results.add(addDirective(directive, pc));
}
return results;
}
private synchronized Fallible<Long> removeEntry(FSPermissionChecker pc,
long entryId) {
private synchronized Fallible<Long> removeEntry(long entryId,
FSPermissionChecker pc) {
// Check for invalid IDs.
if (entryId <= 0) {
LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID.");
@ -177,20 +179,20 @@ private synchronized Fallible<Long> removeEntry(FSPermissionChecker pc,
LOG.info("removeEntry " + entryId + ": entry not found.");
return new Fallible<Long>(new NoSuchIdException(entryId));
}
CachePool pool = cachePoolsById.get(existing.getDirective().getPoolId());
CachePool pool = cachePools.get(existing.getDirective().getPool());
if (pool == null) {
LOG.info("removeEntry " + entryId + ": pool not found for directive " +
existing.getDirective());
return new Fallible<Long>(
new UnexpectedRemovePathCacheEntryException(entryId));
}
if (!pc.checkPermission(pool, FsAction.WRITE)) {
if ((pc != null) && (!pc.checkPermission(pool, FsAction.WRITE))) {
LOG.info("removeEntry " + entryId + ": write permission denied to " +
"pool " + pool + " for entry " + existing);
return new Fallible<Long>(
new RemovePermissionDeniedException(entryId));
}
// Remove the corresponding entry in entriesByDirective.
if (entriesByDirective.remove(existing.getDirective()) == null) {
LOG.warn("removeEntry " + entryId + ": failed to find existing entry " +
@ -202,41 +204,43 @@ private synchronized Fallible<Long> removeEntry(FSPermissionChecker pc,
return new Fallible<Long>(entryId);
}
public synchronized List<Fallible<Long>> removeEntries(FSPermissionChecker pc,
List<Long> entryIds) {
public synchronized List<Fallible<Long>> removeEntries(List<Long> entryIds,
FSPermissionChecker pc) {
ArrayList<Fallible<Long>> results =
new ArrayList<Fallible<Long>>(entryIds.size());
for (Long entryId : entryIds) {
results.add(removeEntry(pc, entryId));
results.add(removeEntry(entryId, pc));
}
return results;
}
public synchronized List<PathCacheEntry> listPathCacheEntries(
FSPermissionChecker pc, long prevId, Long poolId, int maxReplies) {
final int MAX_PRE_ALLOCATED_ENTRIES = 16;
ArrayList<PathCacheEntry> replies = new ArrayList<PathCacheEntry>(
Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies));
public synchronized BatchedListEntries<PathCacheEntry>
listPathCacheEntries(long prevId, String filterPool, FSPermissionChecker pc) {
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
ArrayList<PathCacheEntry> replies =
new ArrayList<PathCacheEntry>(NUM_PRE_ALLOCATED_ENTRIES);
int numReplies = 0;
SortedMap<Long, PathCacheEntry> tailMap = entriesById.tailMap(prevId + 1);
for (PathCacheEntry entry : tailMap.values()) {
if (numReplies >= maxReplies) {
return replies;
for (Entry<Long, PathCacheEntry> cur : tailMap.entrySet()) {
if (numReplies >= maxListCacheDirectivesResponses) {
return new BatchedListEntries<PathCacheEntry>(replies, true);
}
long entryPoolId = entry.getDirective().getPoolId();
if (poolId == null || poolId <= 0 || entryPoolId == poolId) {
if (pc.checkPermission(
cachePoolsById.get(entryPoolId), FsAction.EXECUTE)) {
replies.add(entry);
numReplies++;
}
PathCacheEntry curEntry = cur.getValue();
if (!filterPool.isEmpty() &&
!cur.getValue().getDirective().getPool().equals(filterPool)) {
continue;
}
CachePool pool = cachePools.get(curEntry.getDirective().getPool());
if (pool == null) {
LOG.error("invalid pool for PathCacheEntry " + curEntry);
continue;
}
if (pc.checkPermission(pool, FsAction.EXECUTE)) {
replies.add(cur.getValue());
numReplies++;
}
}
return replies;
}
synchronized CachePool getCachePool(long id) {
return cachePoolsById.get(id);
return new BatchedListEntries<PathCacheEntry>(replies, false);
}
/**
@ -246,24 +250,22 @@ synchronized CachePool getCachePool(long id) {
*
* @param info
* The info for the cache pool to create.
* @return created CachePool
*/
public synchronized CachePool addCachePool(CachePoolInfo info)
public synchronized void addCachePool(CachePoolInfo info)
throws IOException {
String poolName = info.getPoolName();
if (poolName == null || poolName.isEmpty()) {
if (poolName.isEmpty()) {
throw new IOException("invalid empty cache pool name");
}
if (cachePoolsByName.containsKey(poolName)) {
CachePool pool = cachePools.get(poolName);
if (pool != null) {
throw new IOException("cache pool " + poolName + " already exists.");
}
CachePool cachePool = new CachePool(getNextPoolId(), poolName,
CachePool cachePool = new CachePool(poolName,
info.getOwnerName(), info.getGroupName(), info.getMode(),
info.getWeight());
cachePoolsById.put(cachePool.getId(), cachePool);
cachePoolsByName.put(poolName, cachePool);
cachePools.put(poolName, cachePool);
LOG.info("created new cache pool " + cachePool);
return cachePool;
}
/**
@ -274,62 +276,46 @@ public synchronized CachePool addCachePool(CachePoolInfo info)
* @param info
* The info for the cache pool to modify.
*/
public synchronized void modifyCachePool(long poolId, CachePoolInfo info)
public synchronized void modifyCachePool(CachePoolInfo info)
throws IOException {
if (poolId <= 0) {
throw new IOException("invalid pool id " + poolId);
String poolName = info.getPoolName();
if (poolName.isEmpty()) {
throw new IOException("invalid empty cache pool name");
}
if (!cachePoolsById.containsKey(poolId)) {
throw new IOException("cache pool id " + poolId + " does not exist.");
CachePool pool = cachePools.get(poolName);
if (pool == null) {
throw new IOException("cache pool " + poolName + " does not exist.");
}
CachePool pool = cachePoolsById.get(poolId);
// Remove the old CachePoolInfo
removeCachePool(poolId);
// Build up the new CachePoolInfo
CachePoolInfo.Builder newInfo = CachePoolInfo.newBuilder(pool.getInfo());
StringBuilder bld = new StringBuilder();
String prefix = "";
if (info.getPoolName() != null) {
newInfo.setPoolName(info.getPoolName());
bld.append(prefix).
append("set name to ").append(info.getOwnerName());
prefix = "; ";
}
if (info.getOwnerName() != null) {
newInfo.setOwnerName(info.getOwnerName());
pool.setOwnerName(info.getOwnerName());
bld.append(prefix).
append("set owner to ").append(info.getOwnerName());
prefix = "; ";
}
if (info.getGroupName() != null) {
newInfo.setGroupName(info.getGroupName());
pool.setGroupName(info.getGroupName());
bld.append(prefix).
append("set group to ").append(info.getGroupName());
prefix = "; ";
}
if (info.getMode() != null) {
newInfo.setMode(info.getMode());
pool.setMode(info.getMode());
bld.append(prefix).
append(String.format("set mode to ", info.getMode()));
append(String.format("set mode to 0%3o", info.getMode()));
prefix = "; ";
}
if (info.getWeight() != null) {
newInfo.setWeight(info.getWeight());
pool.setWeight(info.getWeight());
bld.append(prefix).
append("set weight to ").append(info.getWeight());
prefix = "; ";
}
if (prefix.isEmpty()) {
bld.append("no changes.");
} else {
pool.setInfo(newInfo.build());
}
// Put the newly modified info back in
cachePoolsById.put(poolId, pool);
cachePoolsByName.put(info.getPoolName(), pool);
LOG.info("modified pool id " + pool.getId()
+ " (" + pool.getInfo().getPoolName() + "); "
+ bld.toString());
LOG.info("modified " + poolName + "; " + bld.toString());
}
/**
@ -337,39 +323,47 @@ public synchronized void modifyCachePool(long poolId, CachePoolInfo info)
*
* Only the superuser should be able to call this function.
*
* @param poolId
* The id of the cache pool to remove.
* @param poolName
* The name for the cache pool to remove.
*/
public synchronized void removeCachePool(long poolId) throws IOException {
if (!cachePoolsById.containsKey(poolId)) {
throw new IOException("can't remove nonexistent cache pool id " + poolId);
public synchronized void removeCachePool(String poolName)
throws IOException {
CachePool pool = cachePools.remove(poolName);
if (pool == null) {
throw new IOException("can't remove nonexistent cache pool " + poolName);
}
// Remove all the entries associated with the pool
Iterator<Map.Entry<Long, PathCacheEntry>> it =
entriesById.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Long, PathCacheEntry> entry = it.next();
if (entry.getValue().getDirective().getPoolId() == poolId) {
it.remove();
entriesByDirective.remove(entry.getValue().getDirective());
// Remove entries using this pool
// TODO: could optimize this somewhat to avoid the need to iterate
// over all entries in entriesByDirective
Iterator<Entry<PathCacheDirective, PathCacheEntry>> iter =
entriesByDirective.entrySet().iterator();
while (iter.hasNext()) {
Entry<PathCacheDirective, PathCacheEntry> entry = iter.next();
if (entry.getKey().getPool().equals(poolName)) {
entriesById.remove(entry.getValue().getEntryId());
iter.remove();
}
}
// Remove the pool
CachePool pool = cachePoolsById.remove(poolId);
cachePoolsByName.remove(pool.getInfo().getPoolName());
}
public synchronized List<CachePool> listCachePools(Long prevKey,
int maxRepliesPerRequest) {
final int MAX_PREALLOCATED_REPLIES = 16;
ArrayList<CachePool> results =
new ArrayList<CachePool>(Math.min(MAX_PREALLOCATED_REPLIES,
maxRepliesPerRequest));
SortedMap<Long, CachePool> tailMap =
cachePoolsById.tailMap(prevKey, false);
for (CachePool pool : tailMap.values()) {
results.add(pool);
public synchronized BatchedListEntries<CachePoolInfo>
listCachePools(FSPermissionChecker pc, String prevKey) {
final int NUM_PRE_ALLOCATED_ENTRIES = 16;
ArrayList<CachePoolInfo> results =
new ArrayList<CachePoolInfo>(NUM_PRE_ALLOCATED_ENTRIES);
SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false);
int numListed = 0;
for (Entry<String, CachePool> cur : tailMap.entrySet()) {
if (numListed++ >= maxListCachePoolsResponses) {
return new BatchedListEntries<CachePoolInfo>(results, true);
}
if (pc == null) {
results.add(cur.getValue().getInfo(true));
} else {
results.add(cur.getValue().getInfo(pc));
}
}
return results;
return new BatchedListEntries<CachePoolInfo>(results, false);
}
}

View File

@ -19,119 +19,137 @@
import java.io.IOException;
import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import javax.annotation.Nonnull;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo.Builder;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.security.UserGroupInformation;
/**
* A CachePool describes a set of cache resources being managed by the NameNode.
* User caching requests are billed to the cache pool specified in the request.
*
* CachePools are uniquely identified by a numeric id as well as the
* {@link CachePoolInfo} pool name. Mutable metadata is contained in
* CachePoolInfo, including pool name, owner, group, and permissions.
* See this class for more details.
* This is an internal class, only used on the NameNode. For identifying or
* describing a cache pool to clients, please use CachePoolInfo.
*/
@InterfaceAudience.Private
public final class CachePool {
public static final Log LOG = LogFactory.getLog(CachePool.class);
private final long id;
@Nonnull
private final String poolName;
private CachePoolInfo info;
@Nonnull
private String ownerName;
public CachePool(long id) {
this.id = id;
this.info = null;
}
CachePool(long id, String poolName, String ownerName, String groupName,
@Nonnull
private String groupName;
@Nonnull
private FsPermission mode;
private int weight;
public CachePool(String poolName, String ownerName, String groupName,
FsPermission mode, Integer weight) throws IOException {
this.id = id;
// Set CachePoolInfo default fields if null
if (poolName == null || poolName.isEmpty()) {
throw new IOException("invalid empty cache pool name");
}
this.poolName = poolName;
UserGroupInformation ugi = null;
if (ownerName == null) {
ugi = NameNode.getRemoteUser();
ownerName = ugi.getShortUserName();
if (ugi == null) {
ugi = NameNode.getRemoteUser();
}
this.ownerName = ugi.getShortUserName();
} else {
this.ownerName = ownerName;
}
if (groupName == null) {
if (ugi == null) {
ugi = NameNode.getRemoteUser();
}
String[] groups = ugi.getGroupNames();
if (groups.length == 0) {
throw new IOException("failed to get group names from UGI " + ugi);
}
groupName = groups[0];
this.groupName = ugi.getPrimaryGroupName();
} else {
this.groupName = ownerName;
}
if (mode == null) {
mode = FsPermission.getDirDefault();
}
if (weight == null) {
weight = 100;
}
CachePoolInfo.Builder builder = CachePoolInfo.newBuilder();
builder.setPoolName(poolName).setOwnerName(ownerName)
.setGroupName(groupName).setMode(mode).setWeight(weight);
this.info = builder.build();
this.mode = mode != null ?
new FsPermission(mode): FsPermission.getCachePoolDefault();
this.weight = weight != null ? weight : 100;
}
public CachePool(long id, CachePoolInfo info) {
this.id = id;
this.info = info;
public String getName() {
return poolName;
}
/**
* @return id of the pool
*/
public long getId() {
return id;
public String getOwnerName() {
return ownerName;
}
public CachePool setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
public String getGroupName() {
return groupName;
}
public CachePool setGroupName(String groupName) {
this.groupName = groupName;
return this;
}
public FsPermission getMode() {
return mode;
}
public CachePool setMode(FsPermission mode) {
this.mode = new FsPermission(mode);
return this;
}
public int getWeight() {
return weight;
}
public CachePool setWeight(int weight) {
this.weight = weight;
return this;
}
/**
* Get information about this cache pool.
*
* @param fullInfo
* If true, only the name will be returned (i.e., what you
* would get if you didn't have read permission for this pool.)
* @return
* Cache pool information.
*/
public CachePoolInfo getInfo() {
return info;
public CachePoolInfo getInfo(boolean fullInfo) {
CachePoolInfo info = new CachePoolInfo(poolName);
if (!fullInfo) {
return info;
}
return info.setOwnerName(ownerName).
setGroupName(groupName).
setMode(new FsPermission(mode)).
setWeight(weight);
}
void setInfo(CachePoolInfo info) {
this.info = info;
public CachePoolInfo getInfo(FSPermissionChecker pc) {
return getInfo(pc.checkPermission(this, FsAction.READ));
}
public String toString() {
return new StringBuilder().
append("{ ").append("id:").append(id).
append(", info:").append(info.toString()).
append("{ ").append("poolName:").append(poolName).
append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName).
append(", mode:").append(mode).
append(", weight:").append(weight).
append(" }").toString();
}
@Override
public int hashCode() {
return new HashCodeBuilder().append(id).append(info).hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) { return false; }
if (obj == this) { return true; }
if (obj.getClass() != getClass()) {
return false;
}
CachePool rhs = (CachePool)obj;
return new EqualsBuilder()
.append(id, rhs.id)
.append(info, rhs.info)
.isEquals();
}
}

View File

@ -119,6 +119,7 @@
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -6756,9 +6757,11 @@ List<Fallible<PathCacheEntry>> addPathCacheDirectives(
if (retryCacheEntry != null && retryCacheEntry.isSuccess()) {
return (List<Fallible<PathCacheEntry>>) retryCacheEntry.getPayload();
}
final FSPermissionChecker pc = getPermissionChecker();
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
boolean success = false;
List<Fallible<PathCacheEntry>> results = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
@ -6766,7 +6769,7 @@ List<Fallible<PathCacheEntry>> addPathCacheDirectives(
throw new SafeModeException(
"Cannot add path cache directive", safeMode);
}
results = cacheManager.addDirectives(pc, directives);
results = cacheManager.addDirectives(directives, pc);
//getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119
success = true;
} finally {
@ -6774,7 +6777,7 @@ List<Fallible<PathCacheEntry>> addPathCacheDirectives(
if (success) {
getEditLog().logSync();
}
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addPathCacheDirectives", null, null, null);
}
RetryCache.setState(retryCacheEntry, success, results);
@ -6783,147 +6786,175 @@ List<Fallible<PathCacheEntry>> addPathCacheDirectives(
}
@SuppressWarnings("unchecked")
List<Fallible<Long>> removePathCacheEntries(List<Long> ids)
throws IOException {
final FSPermissionChecker pc = getPermissionChecker();
List<Fallible<Long>> removePathCacheEntries(List<Long> ids) throws IOException {
CacheEntryWithPayload retryCacheEntry =
RetryCache.waitForCompletion(retryCache, null);
if (retryCacheEntry != null && retryCacheEntry.isSuccess()) {
return (List<Fallible<Long>>) retryCacheEntry.getPayload();
}
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
boolean success = false;
List<Fallible<Long>> results = null;
checkOperation(OperationCategory.WRITE);
writeLock();
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add path cache directive", safeMode);
"Cannot remove path cache directives", safeMode);
}
results = cacheManager.removeEntries(pc, ids);
results = cacheManager.removeEntries(ids, pc);
//getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119
success = true;
} finally {
writeUnlock();
if (success) {
getEditLog().logSync();
}
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "removePathCacheEntries", null, null, null);
}
RetryCache.setState(retryCacheEntry, success, results);
}
getEditLog().logSync();
return results;
}
BatchedListEntries<PathCacheEntry> listPathCacheEntries(long startId,
String pool) throws IOException {
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
BatchedListEntries<PathCacheEntry> results;
checkOperation(OperationCategory.READ);
readLock();
boolean success = false;
try {
checkOperation(OperationCategory.READ);
results = cacheManager.listPathCacheEntries(startId, pool, pc);
success = true;
} finally {
readUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "listPathCacheEntries", null, null, null);
}
}
return results;
}
List<PathCacheEntry> listPathCacheEntries(long startId,
Long poolId, int maxReplies) throws IOException {
LOG.info("listPathCacheEntries with " + startId + " " + poolId);
final FSPermissionChecker pc = getPermissionChecker();
return cacheManager.listPathCacheEntries(pc, startId, poolId, maxReplies);
}
public CachePool addCachePool(CachePoolInfo req) throws IOException {
final FSPermissionChecker pc = getPermissionChecker();
CacheEntryWithPayload cacheEntry =
RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return (CachePool)cacheEntry.getPayload(); // Return previous response
}
writeLock();
CachePool pool = null;
try {
checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) {
throw new AccessControlException("Non-super users cannot " +
"add cache pools.");
}
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot add cache pool " + req.getPoolName(), safeMode);
}
pool = cacheManager.addCachePool(req);
RetryCache.setState(cacheEntry, true);
//getEditLog().logAddCachePool(req); // FIXME: HDFS-5119
} finally {
writeUnlock();
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "addCachePool", req.getPoolName(), null, null);
}
return pool;
}
public void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException {
final FSPermissionChecker pc = getPermissionChecker();
public void addCachePool(CachePoolInfo req) throws IOException {
final FSPermissionChecker pc = isPermissionEnabled ?
getPermissionChecker() : null;
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
checkOperation(OperationCategory.WRITE);
writeLock();
boolean success = false;
try {
checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) {
throw new AccessControlException("Non-super users cannot " +
"modify cache pools.");
}
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot modify cache pool " + info.getPoolName(), safeMode);
"Cannot add cache pool " + req.getPoolName(), safeMode);
}
cacheManager.modifyCachePool(poolId, info);
RetryCache.setState(cacheEntry, true);
if (pc != null) {
pc.checkSuperuserPrivilege();
}
cacheManager.addCachePool(req);
//getEditLog().logAddCachePool(req); // FIXME: HDFS-5119
success = true;
} finally {
writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addCachePool", req.getPoolName(), null, null);
}
RetryCache.setState(cacheEntry, success);
}
getEditLog().logSync();
}
public void modifyCachePool(CachePoolInfo req) throws IOException {
final FSPermissionChecker pc =
isPermissionEnabled ? getPermissionChecker() : null;
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
checkOperation(OperationCategory.WRITE);
writeLock();
boolean success = false;
try {
checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) {
throw new SafeModeException(
"Cannot modify cache pool " + req.getPoolName(), safeMode);
}
if (pc != null) {
pc.checkSuperuserPrivilege();
}
cacheManager.modifyCachePool(req);
//getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119
success = true;
} finally {
writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null);
}
RetryCache.setState(cacheEntry, success);
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "modifyCachePool", info.getPoolName(), null, null);
}
}
public void removeCachePool(long poolId) throws IOException {
final FSPermissionChecker pc = getPermissionChecker();
public void removeCachePool(String cachePoolName) throws IOException {
final FSPermissionChecker pc =
isPermissionEnabled ? getPermissionChecker() : null;
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
checkOperation(OperationCategory.WRITE);
writeLock();
CachePool pool;
boolean success = false;
try {
checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) {
throw new AccessControlException("Non-super users cannot " +
"remove cache pools.");
}
pool = cacheManager.getCachePool(poolId);
if (isInSafeMode()) {
String identifier;
if (pool == null) {
identifier = "with id " + Long.toString(poolId);
} else {
identifier = pool.getInfo().getPoolName();
}
throw new SafeModeException(
"Cannot remove cache pool " + identifier, safeMode);
"Cannot remove cache pool " + cachePoolName, safeMode);
}
cacheManager.removeCachePool(poolId);
if (pc != null) {
pc.checkSuperuserPrivilege();
}
cacheManager.removeCachePool(cachePoolName);
//getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119
success = true;
} finally {
writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "removeCachePool", cachePoolName, null, null);
}
RetryCache.setState(cacheEntry, success);
}
getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "removeCachePool", pool.getInfo().getPoolName(),
null, null);
}
}
public List<CachePool> listCachePools(long prevKey,
int maxRepliesPerRequest) throws IOException {
List<CachePool> results;
public BatchedListEntries<CachePoolInfo> listCachePools(String prevKey)
throws IOException {
final FSPermissionChecker pc =
isPermissionEnabled ? getPermissionChecker() : null;
BatchedListEntries<CachePoolInfo> results;
checkOperation(OperationCategory.READ);
boolean success = false;
readLock();
try {
checkOperation(OperationCategory.READ);
results = cacheManager.listCachePools(prevKey, maxRepliesPerRequest);
results = cacheManager.listCachePools(pc, prevKey);
success = true;
} finally {
readUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "listCachePools", null, null, null);
}
}
return results;
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashSet;
@ -28,7 +29,6 @@
import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@ -264,16 +264,15 @@ private void checkStickyBit(INode parent, INode inode, Snapshot snapshot
* @return if the pool can be accessed
*/
public boolean checkPermission(CachePool pool, FsAction access) {
CachePoolInfo info = pool.getInfo();
FsPermission mode = info.getMode();
FsPermission mode = pool.getMode();
if (isSuperUser()) {
return true;
}
if (user.equals(info.getOwnerName())
if (user.equals(pool.getOwnerName())
&& mode.getUserAction().implies(access)) {
return true;
}
if (groups.contains(info.getGroupName())
if (groups.contains(pool.getGroupName())
&& mode.getGroupAction().implies(access)) {
return true;
}

View File

@ -31,11 +31,13 @@
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.NoSuchElementException;
import org.apache.commons.logging.Log;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag;
@ -60,9 +62,9 @@
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -1223,20 +1225,17 @@ public List<Fallible<Long>> removePathCacheEntries(List<Long> ids)
private class ServerSidePathCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathCacheEntry> {
private final Long poolId;
private final String pool;
public ServerSidePathCacheEntriesIterator(Long firstKey,
int maxRepliesPerRequest, Long poolId) {
super(firstKey, maxRepliesPerRequest);
this.poolId = poolId;
public ServerSidePathCacheEntriesIterator(Long firstKey, String pool) {
super(firstKey);
this.pool = pool;
}
@Override
public BatchedEntries<PathCacheEntry> makeRequest(
Long prevKey, int maxRepliesPerRequest) throws IOException {
return new BatchedListEntries<PathCacheEntry>(
namesystem.listPathCacheEntries(prevKey, poolId,
maxRepliesPerRequest));
Long nextKey) throws IOException {
return namesystem.listPathCacheEntries(nextKey, pool);
}
@Override
@ -1244,52 +1243,50 @@ public Long elementToPrevKey(PathCacheEntry entry) {
return entry.getEntryId();
}
}
@Override
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
long poolId, int maxReplies) throws IOException {
return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, poolId);
String pool) throws IOException {
return new ServerSidePathCacheEntriesIterator(prevId, pool);
}
@Override
public CachePool addCachePool(CachePoolInfo info) throws IOException {
return namesystem.addCachePool(info);
public void addCachePool(CachePoolInfo info) throws IOException {
namesystem.addCachePool(info);
}
@Override
public void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException {
namesystem.modifyCachePool(poolId, info);
public void modifyCachePool(CachePoolInfo info) throws IOException {
namesystem.modifyCachePool(info);
}
@Override
public void removeCachePool(long poolId) throws IOException {
namesystem.removeCachePool(poolId);
public void removeCachePool(String cachePoolName) throws IOException {
namesystem.removeCachePool(cachePoolName);
}
private class ServerSideCachePoolIterator
extends BatchedRemoteIterator<Long, CachePool> {
extends BatchedRemoteIterator<String, CachePoolInfo> {
public ServerSideCachePoolIterator(long prevId, int maxRepliesPerRequest) {
super(prevId, maxRepliesPerRequest);
public ServerSideCachePoolIterator(String prevKey) {
super(prevKey);
}
@Override
public BatchedEntries<CachePool> makeRequest(Long prevId,
int maxRepliesPerRequest) throws IOException {
return new BatchedListEntries<CachePool>(
namesystem.listCachePools(prevId, maxRepliesPerRequest));
public BatchedEntries<CachePoolInfo> makeRequest(String prevKey)
throws IOException {
return namesystem.listCachePools(prevKey);
}
@Override
public Long elementToPrevKey(CachePool element) {
return element.getId();
public String elementToPrevKey(CachePoolInfo element) {
return element.getPoolName();
}
}
@Override
public RemoteIterator<CachePool> listCachePools(long prevPoolId,
int maxRepliesPerRequest) throws IOException {
return new ServerSideCachePoolIterator(prevPoolId, maxRepliesPerRequest);
public RemoteIterator<CachePoolInfo> listCachePools(String prevKey)
throws IOException {
return new ServerSideCachePoolIterator(prevKey);
}
}

View File

@ -363,27 +363,9 @@ message IsFileClosedResponseProto {
required bool result = 1;
}
message CachePoolInfoProto {
optional string poolName = 1;
optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
}
message CachePoolProto {
optional int64 id = 1;
optional CachePoolInfoProto info = 2;
}
message PathCacheDirectiveProto {
required string path = 1;
required CachePoolProto pool = 2;
}
message PathCacheEntryProto {
required int64 id = 1;
optional PathCacheDirectiveProto directive = 2;
required string pool = 2;
}
message AddPathCacheDirectivesRequestProto {
@ -417,42 +399,52 @@ enum RemovePathCacheEntryErrorProto {
}
message ListPathCacheEntriesRequestProto {
required PathCacheEntryProto prevEntry = 1;
required CachePoolProto pool = 2;
optional int32 maxReplies = 3;
required int64 prevId = 1;
required string pool = 2;
}
message ListPathCacheEntriesElementProto {
required int64 id = 1;
required string path = 2;
required string pool = 3;
}
message ListPathCacheEntriesResponseProto {
repeated PathCacheEntryProto entries = 1;
repeated ListPathCacheEntriesElementProto elements = 1;
required bool hasMore = 2;
}
message AddCachePoolRequestProto {
required CachePoolInfoProto info = 1;
required string poolName = 1;
optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
}
message AddCachePoolResponseProto {
required CachePoolProto pool = 1;
message AddCachePoolResponseProto { // void response
}
message ModifyCachePoolRequestProto {
required CachePoolProto pool = 1;
required CachePoolInfoProto info = 2;
required string poolName = 1;
optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
}
message ModifyCachePoolResponseProto { // void response
}
message RemoveCachePoolRequestProto {
required CachePoolProto pool = 1;
required string poolName = 1;
}
message RemoveCachePoolResponseProto { // void response
}
message ListCachePoolsRequestProto {
required CachePoolProto prevPool = 1;
required int32 maxReplies = 2;
required string prevPoolName = 1;
}
message ListCachePoolsResponseProto {
@ -461,7 +453,11 @@ message ListCachePoolsResponseProto {
}
message ListCachePoolsResponseElementProto {
required CachePoolProto pool = 1;
required string poolName = 1;
required string ownerName = 2;
required string groupName = 3;
required int32 mode = 4;
required int32 weight = 5;
}
message GetFileLinkInfoRequestProto {

View File

@ -17,7 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.*;
import java.io.IOException;
import java.security.PrivilegedExceptionAction;
@ -31,64 +31,58 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.util.Fallible;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;
public class TestPathCacheRequests {
static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class);
private static Configuration conf = new HdfsConfiguration();
private static MiniDFSCluster cluster = null;
private static NamenodeProtocols proto = null;
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
proto = cluster.getNameNodeRpc();
}
@After
public void tearDown() throws Exception {
if (cluster != null) {
cluster.shutdown();
}
}
private static final UserGroupInformation unprivilegedUser =
UserGroupInformation.createRemoteUser("unprivilegedUser");
@Test
public void testCreateAndRemovePools() throws Exception {
CachePoolInfo req =
CachePoolInfo.newBuilder().setPoolName("pool1").setOwnerName("bob")
.setGroupName("bobgroup").setMode(new FsPermission((short) 0755))
.setWeight(150).build();
CachePool pool = proto.addCachePool(req);
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
NamenodeProtocols proto = cluster.getNameNodeRpc();
CachePoolInfo req = new CachePoolInfo("pool1").
setOwnerName("bob").setGroupName("bobgroup").
setMode(new FsPermission((short)0755)).setWeight(150);
proto.addCachePool(req);
try {
proto.removeCachePool(909);
proto.removeCachePool("pool99");
Assert.fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("can't remove " +
"nonexistent cache pool", ioe);
}
proto.removeCachePool(pool.getId());
proto.removeCachePool("pool1");
try {
proto.removeCachePool(pool.getId());
proto.removeCachePool("pool1");
Assert.fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("can't remove " +
"nonexistent cache pool", ioe);
}
req = new CachePoolInfo("pool2");
proto.addCachePool(req);
@ -96,42 +90,36 @@ public void testCreateAndRemovePools() throws Exception {
@Test
public void testCreateAndModifyPools() throws Exception {
// Create a new pool
CachePoolInfo info = CachePoolInfo.newBuilder().
setPoolName("pool1").
setOwnerName("abc").
setGroupName("123").
setMode(new FsPermission((short)0755)).
setWeight(150).
build();
CachePool pool = proto.addCachePool(info);
CachePoolInfo actualInfo = pool.getInfo();
assertEquals("Expected info to match create time settings",
info, actualInfo);
// Modify the pool
info = CachePoolInfo.newBuilder().
setPoolName("pool2").
setOwnerName("def").
setGroupName("456").
setMode(new FsPermission((short)0644)).
setWeight(200).
build();
proto.modifyCachePool(pool.getId(), info);
// Check via listing this time
RemoteIterator<CachePool> iter = proto.listCachePools(0, 1);
CachePool listedPool = iter.next();
actualInfo = listedPool.getInfo();
assertEquals("Expected info to match modified settings", info, actualInfo);
Configuration conf = new HdfsConfiguration();
MiniDFSCluster cluster = null;
// set low limits here for testing purposes
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_POOLS_NUM_RESPONSES, 2);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_LIST_CACHE_DIRECTIVES_NUM_RESPONSES, 2);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
NamenodeProtocols proto = cluster.getNameNodeRpc();
proto.addCachePool(new CachePoolInfo("pool1").
setOwnerName("abc").setGroupName("123").
setMode(new FsPermission((short)0755)).setWeight(150));
proto.modifyCachePool(new CachePoolInfo("pool1").
setOwnerName("def").setGroupName("456"));
RemoteIterator<CachePoolInfo> iter = proto.listCachePools("");
CachePoolInfo info = iter.next();
assertEquals("pool1", info.getPoolName());
assertEquals("def", info.getOwnerName());
assertEquals("456", info.getGroupName());
assertEquals(new FsPermission((short)0755), info.getMode());
assertEquals(Integer.valueOf(150), info.getWeight());
try {
proto.removeCachePool(808);
proto.removeCachePool("pool99");
Assert.fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
}
proto.removeCachePool(pool.getId());
proto.removeCachePool("pool1");
try {
proto.removeCachePool(pool.getId());
proto.removeCachePool("pool1");
Assert.fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
@ -142,13 +130,13 @@ private static void validateListAll(
RemoteIterator<PathCacheEntry> iter,
long id0, long id1, long id2) throws Exception {
Assert.assertEquals(new PathCacheEntry(id0,
new PathCacheDirective("/alpha", 1)),
new PathCacheDirective("/alpha", "pool1")),
iter.next());
Assert.assertEquals(new PathCacheEntry(id1,
new PathCacheDirective("/beta", 2)),
new PathCacheDirective("/beta", "pool2")),
iter.next());
Assert.assertEquals(new PathCacheEntry(id2,
new PathCacheDirective("/gamma", 1)),
new PathCacheDirective("/gamma", "pool1")),
iter.next());
Assert.assertFalse(iter.hasNext());
}
@ -161,36 +149,34 @@ public void testSetAndGet() throws Exception {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive();
final CachePool pool1 = proto.addCachePool(new CachePoolInfo("pool1"));
final CachePool pool2 = proto.addCachePool(new CachePoolInfo("pool2"));
final CachePool pool3 = proto.addCachePool(new CachePoolInfo("pool3"));
final CachePool pool4 = proto.addCachePool(CachePoolInfo.newBuilder()
.setPoolName("pool4")
.setMode(new FsPermission((short)0)).build());
UserGroupInformation testUgi = UserGroupInformation
.createUserForTesting("myuser", new String[]{"mygroup"});
List<Fallible<PathCacheEntry>> addResults1 = testUgi.doAs(
new PrivilegedExceptionAction<List<Fallible<PathCacheEntry>>>() {
@Override
public List<Fallible<PathCacheEntry>> run() throws IOException {
List<Fallible<PathCacheEntry>> entries;
entries = proto.addPathCacheDirectives(
Arrays.asList(new PathCacheDirective[] {
new PathCacheDirective("/alpha", pool1.getId()),
new PathCacheDirective("/beta", pool2.getId()),
new PathCacheDirective("", pool3.getId()),
new PathCacheDirective("/zeta", 404),
new PathCacheDirective("/zeta", pool4.getId())
}));
return entries;
final NamenodeProtocols proto = cluster.getNameNodeRpc();
proto.addCachePool(new CachePoolInfo("pool1").
setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool2").
setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool3").
setMode(new FsPermission((short)0777)));
proto.addCachePool(new CachePoolInfo("pool4").
setMode(new FsPermission((short)0)));
List<Fallible<PathCacheEntry>> addResults1 =
unprivilegedUser.doAs(new PrivilegedExceptionAction<
List<Fallible<PathCacheEntry>>>() {
@Override
public List<Fallible<PathCacheEntry>> run() throws IOException {
return proto.addPathCacheDirectives(Arrays.asList(
new PathCacheDirective[] {
new PathCacheDirective("/alpha", "pool1"),
new PathCacheDirective("/beta", "pool2"),
new PathCacheDirective("", "pool3"),
new PathCacheDirective("/zeta", "nonexistent_pool"),
new PathCacheDirective("/zeta", "pool4")
}));
}
});
// Save the successful additions
});
long ids1[] = new long[2];
for (int i=0; i<2; i++) {
ids1[i] = addResults1.get(i).get().getEntryId();
}
// Verify that the unsuccessful additions failed properly
ids1[0] = addResults1.get(0).get().getEntryId();
ids1[1] = addResults1.get(1).get().getEntryId();
try {
addResults1.get(2).get();
Assert.fail("expected an error when adding an empty path");
@ -201,7 +187,7 @@ public List<Fallible<PathCacheEntry>> run() throws IOException {
addResults1.get(3).get();
Assert.fail("expected an error when adding to a nonexistent pool.");
} catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError);
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError);
}
try {
addResults1.get(4).get();
@ -215,10 +201,10 @@ public List<Fallible<PathCacheEntry>> run() throws IOException {
List<Fallible<PathCacheEntry>> addResults2 =
proto.addPathCacheDirectives(Arrays.asList(
new PathCacheDirective[] {
new PathCacheDirective("/alpha", pool1.getId()),
new PathCacheDirective("/theta", 404),
new PathCacheDirective("bogus", pool1.getId()),
new PathCacheDirective("/gamma", pool1.getId())
new PathCacheDirective("/alpha", "pool1"),
new PathCacheDirective("/theta", ""),
new PathCacheDirective("bogus", "pool1"),
new PathCacheDirective("/gamma", "pool1")
}));
long id = addResults2.get(0).get().getEntryId();
Assert.assertEquals("expected to get back the same ID as last time " +
@ -228,7 +214,7 @@ public List<Fallible<PathCacheEntry>> run() throws IOException {
Assert.fail("expected an error when adding a path cache " +
"directive with an empty pool name.");
} catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError);
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError);
}
try {
addResults2.get(2).get();
@ -240,16 +226,14 @@ public List<Fallible<PathCacheEntry>> run() throws IOException {
long ids2[] = new long[1];
ids2[0] = addResults2.get(3).get().getEntryId();
// Validate listing all entries
RemoteIterator<PathCacheEntry> iter =
proto.listPathCacheEntries(-1l, -1l, 100);
proto.listPathCacheEntries(0, "");
validateListAll(iter, ids1[0], ids1[1], ids2[0]);
iter = proto.listPathCacheEntries(-1l, -1l, 1);
iter = proto.listPathCacheEntries(0, "");
validateListAll(iter, ids1[0], ids1[1], ids2[0]);
// Validate listing certain pools
iter = proto.listPathCacheEntries(0, pool3.getId(), 1);
iter = proto.listPathCacheEntries(0, "pool3");
Assert.assertFalse(iter.hasNext());
iter = proto.listPathCacheEntries(0, pool2.getId(), 4444);
iter = proto.listPathCacheEntries(0, "pool2");
Assert.assertEquals(addResults1.get(1).get(),
iter.next());
Assert.assertFalse(iter.hasNext());
@ -271,7 +255,7 @@ public List<Fallible<PathCacheEntry>> run() throws IOException {
} catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException);
}
iter = proto.listPathCacheEntries(0, pool2.getId(), 4444);
iter = proto.listPathCacheEntries(0, "pool2");
Assert.assertFalse(iter.hasNext());
} finally {
if (cluster != null) { cluster.shutdown(); }