commit correct version of HDFS-5121

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1520090 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Colin McCabe 2013-09-04 18:23:51 +00:00
parent 97b7267977
commit d56d0b46e1
16 changed files with 828 additions and 684 deletions

View File

@ -29,7 +29,7 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
public E get(int i); public E get(int i);
public int size(); public int size();
} }
public static class BatchedListEntries<E> implements BatchedEntries<E> { public static class BatchedListEntries<E> implements BatchedEntries<E> {
private final List<E> entries; private final List<E> entries;
@ -39,7 +39,6 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
public E get(int i) { public E get(int i) {
return entries.get(i); return entries.get(i);
} }
public int size() { public int size() {
@ -47,13 +46,13 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
} }
} }
private K nextKey; private K prevKey;
private final int maxRepliesPerRequest; private final int maxRepliesPerRequest;
private BatchedEntries<E> entries; private BatchedEntries<E> entries;
private int idx; private int idx;
public BatchedRemoteIterator(K nextKey, int maxRepliesPerRequest) { public BatchedRemoteIterator(K prevKey, int maxRepliesPerRequest) {
this.nextKey = nextKey; this.prevKey = prevKey;
this.maxRepliesPerRequest = maxRepliesPerRequest; this.maxRepliesPerRequest = maxRepliesPerRequest;
this.entries = null; this.entries = null;
this.idx = -1; this.idx = -1;
@ -66,13 +65,13 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
* @param maxRepliesPerRequest The maximum number of replies to allow. * @param maxRepliesPerRequest The maximum number of replies to allow.
* @return A list of replies. * @return A list of replies.
*/ */
public abstract BatchedEntries<E> makeRequest(K nextKey, int maxRepliesPerRequest) public abstract BatchedEntries<E> makeRequest(K prevKey,
throws IOException; int maxRepliesPerRequest) throws IOException;
private void makeRequest() throws IOException { private void makeRequest() throws IOException {
idx = 0; idx = 0;
entries = null; entries = null;
entries = makeRequest(nextKey, maxRepliesPerRequest); entries = makeRequest(prevKey, maxRepliesPerRequest);
if (entries.size() > maxRepliesPerRequest) { if (entries.size() > maxRepliesPerRequest) {
throw new IOException("invalid number of replies returned: got " + throw new IOException("invalid number of replies returned: got " +
entries.size() + ", expected " + maxRepliesPerRequest + entries.size() + ", expected " + maxRepliesPerRequest +
@ -106,7 +105,7 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
/** /**
* Return the next list key associated with an element. * Return the next list key associated with an element.
*/ */
public abstract K elementToNextKey(E element); public abstract K elementToPrevKey(E element);
@Override @Override
public E next() throws IOException { public E next() throws IOException {
@ -115,7 +114,7 @@ public abstract class BatchedRemoteIterator<K, E> implements RemoteIterator<E> {
throw new NoSuchElementException(); throw new NoSuchElementException();
} }
E entry = entries.get(idx++); E entry = entries.get(idx++);
nextKey = elementToNextKey(entry); prevKey = elementToPrevKey(entry);
return entry; return entry;
} }
} }

View File

@ -56,12 +56,12 @@ public abstract class AddPathCacheDirectiveException extends IOException {
} }
} }
public static class InvalidPoolNameError public static class InvalidPoolError
extends AddPathCacheDirectiveException { extends AddPathCacheDirectiveException {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
public InvalidPoolNameError(PathCacheDirective directive) { public InvalidPoolError(PathCacheDirective directive) {
super("invalid pool name '" + directive.getPool() + "'", directive); super("invalid pool id " + directive.getPoolId(), directive);
} }
} }
@ -70,7 +70,7 @@ public abstract class AddPathCacheDirectiveException extends IOException {
private static final long serialVersionUID = 1L; private static final long serialVersionUID = 1L;
public PoolWritePermissionDeniedError(PathCacheDirective directive) { public PoolWritePermissionDeniedError(PathCacheDirective directive) {
super("write permission denied for pool '" + directive.getPool() + "'", super("write permission denied for pool id " + directive.getPoolId(),
directive); directive);
} }
} }
@ -82,7 +82,9 @@ public abstract class AddPathCacheDirectiveException extends IOException {
public UnexpectedAddPathCacheDirectiveException( public UnexpectedAddPathCacheDirectiveException(
PathCacheDirective directive) { PathCacheDirective directive) {
super("encountered an unexpected error when trying to " + super("encountered an unexpected error when trying to " +
"add path cache directive " + directive, directive); "add path cache directive to pool id " + directive.getPoolId() +
" " + directive,
directive);
} }
} }
}; };

View File

@ -18,35 +18,45 @@
package org.apache.hadoop.hdfs.protocol; package org.apache.hadoop.hdfs.protocol;
import javax.annotation.Nullable; import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.permission.FsPermission;
import com.google.common.base.Preconditions;
/** /**
* Information about a cache pool. * Information about a cache pool.
*
* CachePoolInfo permissions roughly map to Unix file permissions.
* Write permissions allow addition and removal of a {@link PathCacheEntry} from
* the pool. Execute permissions allow listing of PathCacheEntries in a pool.
* Read permissions have no associated meaning.
*/ */
@InterfaceAudience.Private @InterfaceAudience.Private
@InterfaceStability.Evolving @InterfaceStability.Evolving
public class CachePoolInfo { public class CachePoolInfo {
final String poolName;
@Nullable private String poolName;
String ownerName; private String ownerName;
private String groupName;
private FsPermission mode;
private Integer weight;
@Nullable /**
String groupName; * For Builder use
*/
@Nullable private CachePoolInfo() {}
Integer mode;
@Nullable
Integer weight;
/**
* Use a CachePoolInfo {@link Builder} to create a new CachePoolInfo with
* more parameters
*/
public CachePoolInfo(String poolName) { public CachePoolInfo(String poolName) {
this.poolName = poolName; this.poolName = poolName;
} }
public String getPoolName() { public String getPoolName() {
return poolName; return poolName;
} }
@ -55,35 +65,103 @@ public class CachePoolInfo {
return ownerName; return ownerName;
} }
public CachePoolInfo setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
public String getGroupName() { public String getGroupName() {
return groupName; return groupName;
} }
public CachePoolInfo setGroupName(String groupName) { public FsPermission getMode() {
this.groupName = groupName;
return this;
}
public Integer getMode() {
return mode; return mode;
} }
public CachePoolInfo setMode(Integer mode) {
this.mode = mode;
return this;
}
public Integer getWeight() { public Integer getWeight() {
return weight; return weight;
} }
public CachePoolInfo setWeight(Integer weight) { public String toString() {
this.weight = weight; return new StringBuilder().
return this; append("{ ").append("poolName:").append(poolName).
append(", ownerName:").append(ownerName).
append(", groupName:").append(groupName).
append(", mode:").append(mode).
append(", weight:").append(weight).
append(" }").toString();
} }
@Override
public int hashCode() {
return new HashCodeBuilder().append(poolName).append(ownerName)
.append(groupName).append(mode.toShort()).append(weight).hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) { return false; }
if (obj == this) { return true; }
if (obj.getClass() != getClass()) {
return false;
}
CachePoolInfo rhs = (CachePoolInfo)obj;
return new EqualsBuilder()
.append(poolName, rhs.poolName)
.append(ownerName, rhs.ownerName)
.append(groupName, rhs.groupName)
.append(mode, rhs.mode)
.append(weight, rhs.weight)
.isEquals();
}
public static Builder newBuilder() {
return new Builder();
}
public static Builder newBuilder(CachePoolInfo info) {
return new Builder(info);
}
/**
* CachePoolInfo Builder
*/
public static class Builder {
private CachePoolInfo info;
public Builder() {
this.info = new CachePoolInfo();
}
public Builder(CachePoolInfo info) {
this.info = info;
}
public CachePoolInfo build() {
Preconditions.checkNotNull(info.poolName,
"Cannot create a CachePoolInfo without a pool name");
return info;
}
public Builder setPoolName(String poolName) {
info.poolName = poolName;
return this;
}
public Builder setOwnerName(String ownerName) {
info.ownerName = ownerName;
return this;
}
public Builder setGroupName(String groupName) {
info.groupName = groupName;
return this;
}
public Builder setMode(FsPermission mode) {
info.mode = mode;
return this;
}
public Builder setWeight(Integer weight) {
info.weight = weight;
return this;
}
}
} }

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -1099,98 +1100,82 @@ public interface ClientProtocol {
/** /**
* Add some path cache directives to the CacheManager. * Add some path cache directives to the CacheManager.
* *
* @param directives * @param directives A list of path cache directives to be added.
* A list of all the path cache directives we want to add. * @return A Fallible list, where each element is either a successfully addded
* @return * path cache entry, or an IOException describing why the directive
* An list where each element is either a path cache entry that was * could not be added.
* added, or an IOException exception describing why the directive
* could not be added.
*/ */
@AtMostOnce @AtMostOnce
public List<Fallible<PathCacheEntry>> public List<Fallible<PathCacheEntry>> addPathCacheDirectives(
addPathCacheDirectives(List<PathCacheDirective> directives) List<PathCacheDirective> directives) throws IOException;
throws IOException;
/** /**
* Remove some path cache entries from the CacheManager. * Remove some path cache entries from the CacheManager.
* *
* @param ids * @param ids A list of all the entry IDs to be removed from the CacheManager.
* A list of all the IDs we want to remove from the CacheManager. * @return A Fallible list where each element is either a successfully removed
* @return * ID, or an IOException describing why the ID could not be removed.
* An list where each element is either an ID that was removed,
* or an IOException exception describing why the ID could not be
* removed.
*/ */
@AtMostOnce @Idempotent
public List<Fallible<Long>> removePathCacheEntries(List<Long> ids) public List<Fallible<Long>> removePathCacheEntries(List<Long> ids)
throws IOException; throws IOException;
/** /**
* List cached paths on the server. * List the set of cached paths of a cache pool. Incrementally fetches results
* * from the server.
* @param prevId *
* The previous ID that we listed, or 0 if this is the first call * @param prevId The last listed entry ID, or -1 if this is the first call to
* to listPathCacheEntries. * listPathCacheEntries.
* @param pool * @param pool The cache pool to list, or -1 to list all pools
* The pool ID to list. If this is the empty string, all pool ids * @param maxRepliesPerRequest The maximum number of entries to return per
* will be listed. * request
* @param maxRepliesPerRequest * @return A RemoteIterator which returns PathCacheEntry objects.
* The maximum number of replies to make in each request.
* @return
* A RemoteIterator from which you can get PathCacheEntry objects.
* Requests will be made as needed.
*/ */
@Idempotent @Idempotent
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId, public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
String pool, int maxRepliesPerRequest) throws IOException; long poolId, int maxRepliesPerRequest) throws IOException;
/**
* Modify a cache pool.
*
* @param req
* The request to modify a cache pool.
* @throws IOException
* If the request could not be completed.
*/
@AtMostOnce
public void addCachePool(CachePoolInfo info) throws IOException;
/** /**
* Modify a cache pool. * Add a new cache pool.
* *
* @param req * @param info Description of the new cache pool
* The request to modify a cache pool. * @throws IOException If the request could not be completed.
* @throws IOException
* If the request could not be completed.
*/ */
@Idempotent @AtMostOnce
public void modifyCachePool(CachePoolInfo req) throws IOException; public CachePool addCachePool(CachePoolInfo info) throws IOException;
/**
* Modify a cache pool, e.g. pool name, permissions, owner, group.
*
* @param poolId ID of the cache pool to modify
* @param info New metadata for the cache pool
* @throws IOException If the request could not be completed.
*/
@AtMostOnce
public void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException;
/** /**
* Remove a cache pool. * Remove a cache pool.
* *
* @param cachePoolName * @param poolId ID of the cache pool to remove.
* Name of the cache pool to remove. * @throws IOException if the cache pool did not exist, or could not be
* @throws IOException * removed.
* if the cache pool did not exist, or could not be removed.
*/
@AtMostOnce
public void removeCachePool(String cachePoolName) throws IOException;
/**
* List some cache pools.
*
* @param prevKey
* The previous key we listed. We will list keys greater than this.
* @param maxRepliesPerRequest
* Maximum number of cache pools to list.
* @return A remote iterator from which you can get CachePool objects.
* Requests will be made as needed.
* @throws IOException
* If there was an error listing cache pools.
*/ */
@Idempotent @Idempotent
public RemoteIterator<CachePoolInfo> listCachePools(String prevKey, public void removeCachePool(long poolId) throws IOException;
/**
* List the set of cache pools. Incrementally fetches results from the server.
*
* @param prevPoolId ID of the last pool listed, or -1 if this is the first
* invocation of listCachePools
* @param maxRepliesPerRequest Maximum number of cache pools to return per
* server request.
* @return A RemoteIterator which returns CachePool objects.
*/
@Idempotent
public RemoteIterator<CachePool> listCachePools(long prevPoolId,
int maxRepliesPerRequest) throws IOException; int maxRepliesPerRequest) throws IOException;
} }

View File

@ -25,7 +25,7 @@ import com.google.common.collect.ComparisonChain;
import org.apache.commons.lang.builder.HashCodeBuilder; import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
/** /**
@ -33,14 +33,13 @@ import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPat
*/ */
public class PathCacheDirective implements Comparable<PathCacheDirective> { public class PathCacheDirective implements Comparable<PathCacheDirective> {
private final String path; private final String path;
private final long poolId;
private final String pool; public PathCacheDirective(String path, long poolId) {
public PathCacheDirective(String path, String pool) {
Preconditions.checkNotNull(path); Preconditions.checkNotNull(path);
Preconditions.checkNotNull(pool); Preconditions.checkArgument(poolId > 0);
this.path = path; this.path = path;
this.pool = pool; this.poolId = poolId;
} }
/** /**
@ -53,8 +52,8 @@ public class PathCacheDirective implements Comparable<PathCacheDirective> {
/** /**
* @return The pool used in this request. * @return The pool used in this request.
*/ */
public String getPool() { public long getPoolId() {
return pool; return poolId;
} }
/** /**
@ -70,22 +69,22 @@ public class PathCacheDirective implements Comparable<PathCacheDirective> {
if (!DFSUtil.isValidName(path)) { if (!DFSUtil.isValidName(path)) {
throw new InvalidPathNameError(this); throw new InvalidPathNameError(this);
} }
if (pool.isEmpty()) { if (poolId <= 0) {
throw new InvalidPoolNameError(this); throw new InvalidPoolError(this);
} }
} }
@Override @Override
public int compareTo(PathCacheDirective rhs) { public int compareTo(PathCacheDirective rhs) {
return ComparisonChain.start(). return ComparisonChain.start().
compare(pool, rhs.getPool()). compare(poolId, rhs.getPoolId()).
compare(path, rhs.getPath()). compare(path, rhs.getPath()).
result(); result();
} }
@Override @Override
public int hashCode() { public int hashCode() {
return new HashCodeBuilder().append(path).append(pool).hashCode(); return new HashCodeBuilder().append(path).append(poolId).hashCode();
} }
@Override @Override
@ -102,7 +101,7 @@ public class PathCacheDirective implements Comparable<PathCacheDirective> {
public String toString() { public String toString() {
StringBuilder builder = new StringBuilder(); StringBuilder builder = new StringBuilder();
builder.append("{ path:").append(path). builder.append("{ path:").append(path).
append(", pool:").append(pool). append(", poolId:").append(poolId).
append(" }"); append(" }");
return builder.toString(); return builder.toString();
} }

View File

@ -29,8 +29,7 @@ import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -113,7 +112,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCa
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
@ -173,7 +171,6 @@ import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.CachePool; import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto;
import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto;
@ -1038,16 +1035,19 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public AddPathCacheDirectivesResponseProto addPathCacheDirectives(RpcController controller, public AddPathCacheDirectivesResponseProto addPathCacheDirectives(
AddPathCacheDirectivesRequestProto request) throws ServiceException { RpcController controller, AddPathCacheDirectivesRequestProto request)
throws ServiceException {
try { try {
ArrayList<PathCacheDirective> input = ArrayList<PathCacheDirective> input =
new ArrayList<PathCacheDirective>(request.getElementsCount()); new ArrayList<PathCacheDirective>(request.getElementsCount());
for (int i = 0; i < request.getElementsCount(); i++) { for (int i = 0; i < request.getElementsCount(); i++) {
PathCacheDirectiveProto proto = request.getElements(i); PathCacheDirectiveProto proto = request.getElements(i);
input.add(new PathCacheDirective(proto.getPath(), proto.getPool())); input.add(new PathCacheDirective(proto.getPath(),
proto.getPool().getId()));
} }
List<Fallible<PathCacheEntry>> output = server.addPathCacheDirectives(input); List<Fallible<PathCacheEntry>> output = server
.addPathCacheDirectives(input);
AddPathCacheDirectivesResponseProto.Builder builder = AddPathCacheDirectivesResponseProto.Builder builder =
AddPathCacheDirectivesResponseProto.newBuilder(); AddPathCacheDirectivesResponseProto.newBuilder();
for (int idx = 0; idx < output.size(); idx++) { for (int idx = 0; idx < output.size(); idx++) {
@ -1060,7 +1060,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} catch (InvalidPathNameError ioe) { } catch (InvalidPathNameError ioe) {
builder.addResults(AddPathCacheDirectiveErrorProto. builder.addResults(AddPathCacheDirectiveErrorProto.
INVALID_PATH_NAME_ERROR_VALUE); INVALID_PATH_NAME_ERROR_VALUE);
} catch (InvalidPoolNameError ioe) { } catch (InvalidPoolError ioe) {
builder.addResults(AddPathCacheDirectiveErrorProto. builder.addResults(AddPathCacheDirectiveErrorProto.
INVALID_POOL_NAME_ERROR_VALUE); INVALID_POOL_NAME_ERROR_VALUE);
} catch (IOException ioe) { } catch (IOException ioe) {
@ -1108,22 +1108,21 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
} }
@Override @Override
public ListPathCacheEntriesResponseProto listPathCacheEntries(RpcController controller, public ListPathCacheEntriesResponseProto listPathCacheEntries(
ListPathCacheEntriesRequestProto request) throws ServiceException { RpcController controller, ListPathCacheEntriesRequestProto request)
throws ServiceException {
try { try {
CachePool pool = PBHelper.convert(request.getPool());
RemoteIterator<PathCacheEntry> iter = RemoteIterator<PathCacheEntry> iter =
server.listPathCacheEntries(request.getPrevId(), server.listPathCacheEntries(
request.getPool(), PBHelper.convert(request.getPrevEntry()).getEntryId(),
pool.getId(),
request.getMaxReplies()); request.getMaxReplies());
ListPathCacheEntriesResponseProto.Builder builder = ListPathCacheEntriesResponseProto.Builder builder =
ListPathCacheEntriesResponseProto.newBuilder(); ListPathCacheEntriesResponseProto.newBuilder();
while (iter.hasNext()) { while (iter.hasNext()) {
PathCacheEntry entry = iter.next(); PathCacheEntry entry = iter.next();
builder.addElements( builder.addEntries(PBHelper.convert(entry));
ListPathCacheEntriesElementProto.newBuilder().
setId(entry.getEntryId()).
setPath(entry.getDirective().getPath()).
setPool(entry.getDirective().getPool()));
} }
return builder.build(); return builder.build();
} catch (IOException e) { } catch (IOException e) {
@ -1135,46 +1134,20 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public AddCachePoolResponseProto addCachePool(RpcController controller, public AddCachePoolResponseProto addCachePool(RpcController controller,
AddCachePoolRequestProto request) throws ServiceException { AddCachePoolRequestProto request) throws ServiceException {
try { try {
CachePoolInfo info = server.addCachePool(PBHelper.convert(request.getInfo()));
new CachePoolInfo(request.getPoolName());
if (request.hasOwnerName()) {
info.setOwnerName(request.getOwnerName());
}
if (request.hasGroupName()) {
info.setGroupName(request.getGroupName());
}
if (request.hasMode()) {
info.setMode(request.getMode());
}
if (request.hasWeight()) {
info.setWeight(request.getWeight());
}
server.addCachePool(info);
return AddCachePoolResponseProto.newBuilder().build(); return AddCachePoolResponseProto.newBuilder().build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
} }
} }
@Override @Override
public ModifyCachePoolResponseProto modifyCachePool(RpcController controller, public ModifyCachePoolResponseProto modifyCachePool(RpcController controller,
ModifyCachePoolRequestProto request) throws ServiceException { ModifyCachePoolRequestProto request) throws ServiceException {
try { try {
CachePoolInfo info = server.modifyCachePool(
new CachePoolInfo(request.getPoolName()); PBHelper.convert(request.getPool()).getId(),
if (request.hasOwnerName()) { PBHelper.convert(request.getInfo()));
info.setOwnerName(request.getOwnerName());
}
if (request.hasGroupName()) {
info.setGroupName(request.getGroupName());
}
if (request.hasMode()) {
info.setMode(request.getMode());
}
if (request.hasWeight()) {
info.setWeight(request.getWeight());
}
server.modifyCachePool(info);
return ModifyCachePoolResponseProto.newBuilder().build(); return ModifyCachePoolResponseProto.newBuilder().build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
@ -1185,7 +1158,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public RemoveCachePoolResponseProto removeCachePool(RpcController controller, public RemoveCachePoolResponseProto removeCachePool(RpcController controller,
RemoveCachePoolRequestProto request) throws ServiceException { RemoveCachePoolRequestProto request) throws ServiceException {
try { try {
server.removeCachePool(request.getPoolName()); server.removeCachePool(PBHelper.convert(request.getPool()).getId());
return RemoveCachePoolResponseProto.newBuilder().build(); return RemoveCachePoolResponseProto.newBuilder().build();
} catch (IOException e) { } catch (IOException e) {
throw new ServiceException(e); throw new ServiceException(e);
@ -1196,28 +1169,16 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
public ListCachePoolsResponseProto listCachePools(RpcController controller, public ListCachePoolsResponseProto listCachePools(RpcController controller,
ListCachePoolsRequestProto request) throws ServiceException { ListCachePoolsRequestProto request) throws ServiceException {
try { try {
RemoteIterator<CachePoolInfo> iter = RemoteIterator<CachePool> iter =
server.listCachePools(request.getPrevPoolName(), server.listCachePools(PBHelper.convert(request.getPrevPool()).getId(),
request.getMaxReplies()); request.getMaxReplies());
ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.Builder responseBuilder =
ListCachePoolsResponseProto.newBuilder(); ListCachePoolsResponseProto.newBuilder();
while (iter.hasNext()) { while (iter.hasNext()) {
CachePoolInfo pool = iter.next(); CachePool pool = iter.next();
ListCachePoolsResponseElementProto.Builder elemBuilder = ListCachePoolsResponseElementProto.Builder elemBuilder =
ListCachePoolsResponseElementProto.newBuilder(); ListCachePoolsResponseElementProto.newBuilder();
elemBuilder.setPoolName(pool.getPoolName()); elemBuilder.setPool(PBHelper.convert(pool));
if (pool.getOwnerName() != null) {
elemBuilder.setOwnerName(pool.getOwnerName());
}
if (pool.getGroupName() != null) {
elemBuilder.setGroupName(pool.getGroupName());
}
if (pool.getMode() != null) {
elemBuilder.setMode(pool.getMode());
}
if (pool.getWeight() != null) {
elemBuilder.setWeight(pool.getWeight());
}
responseBuilder.addElements(elemBuilder.build()); responseBuilder.addElements(elemBuilder.build());
} }
return responseBuilder.build(); return responseBuilder.build();

View File

@ -23,7 +23,6 @@ import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
import java.util.NoSuchElementException;
import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.classification.InterfaceStability;
@ -38,17 +37,12 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -61,14 +55,18 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AbandonBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectiveErrorProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddPathCacheDirectivesResponseProto;
@ -109,23 +107,23 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSna
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetSnapshottableDirListingResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.IsFileClosedRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseElementProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListPathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MkdirsRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RecoverLeaseRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RefreshNodesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntriesResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemovePathCacheEntryErrorProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RemoveCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto;
@ -146,6 +144,7 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Update
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.UpdatePipelineRequestProto;
import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException; import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException; import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.EnumSetWritable;
@ -1027,7 +1026,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
return new InvalidPathNameError(directive); return new InvalidPathNameError(directive);
} else if (code == AddPathCacheDirectiveErrorProto. } else if (code == AddPathCacheDirectiveErrorProto.
INVALID_POOL_NAME_ERROR_VALUE) { INVALID_POOL_NAME_ERROR_VALUE) {
return new InvalidPoolNameError(directive); return new InvalidPoolError(directive);
} else { } else {
return new UnexpectedAddPathCacheDirectiveException(directive); return new UnexpectedAddPathCacheDirectiveException(directive);
} }
@ -1042,7 +1041,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
for (PathCacheDirective directive : directives) { for (PathCacheDirective directive : directives) {
builder.addElements(PathCacheDirectiveProto.newBuilder(). builder.addElements(PathCacheDirectiveProto.newBuilder().
setPath(directive.getPath()). setPath(directive.getPath()).
setPool(directive.getPool()). setPool(PBHelper.convert(new CachePool(directive.getPoolId()))).
build()); build());
} }
AddPathCacheDirectivesResponseProto result = AddPathCacheDirectivesResponseProto result =
@ -1121,42 +1120,40 @@ public class ClientNamenodeProtocolTranslatorPB implements
@Override @Override
public PathCacheEntry get(int i) { public PathCacheEntry get(int i) {
ListPathCacheEntriesElementProto elementProto = PathCacheEntryProto entryProto = response.getEntries(i);
response.getElements(i); return PBHelper.convert(entryProto);
return new PathCacheEntry(elementProto.getId(),
new PathCacheDirective(elementProto.getPath(),
elementProto.getPool()));
} }
@Override @Override
public int size() { public int size() {
return response.getElementsCount(); return response.getEntriesCount();
} }
} }
private class PathCacheEntriesIterator private class PathCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathCacheEntry> { extends BatchedRemoteIterator<Long, PathCacheEntry> {
private final String pool; private final long poolId;
public PathCacheEntriesIterator(long prevKey, int maxRepliesPerRequest, public PathCacheEntriesIterator(long prevKey, int maxRepliesPerRequest,
String pool) { long poolId) {
super(prevKey, maxRepliesPerRequest); super(prevKey, maxRepliesPerRequest);
this.pool = pool; this.poolId = poolId;
} }
@Override @Override
public BatchedEntries<PathCacheEntry> makeRequest( public BatchedEntries<PathCacheEntry> makeRequest(
Long nextKey, int maxRepliesPerRequest) throws IOException { Long prevEntryId, int maxRepliesPerRequest) throws IOException {
ListPathCacheEntriesResponseProto response; ListPathCacheEntriesResponseProto response;
try { try {
ListPathCacheEntriesRequestProto req = ListPathCacheEntriesRequestProto req =
ListPathCacheEntriesRequestProto.newBuilder(). ListPathCacheEntriesRequestProto.newBuilder().
setPrevId(nextKey). setPrevEntry(
setPool(pool). PBHelper.convert(new PathCacheEntry(prevEntryId, null))).
setPool(PBHelper.convert(new CachePool(poolId))).
setMaxReplies(maxRepliesPerRequest). setMaxReplies(maxRepliesPerRequest).
build(); build();
response = rpcProxy.listPathCacheEntries(null, req); response = rpcProxy.listPathCacheEntries(null, req);
if (response.getElementsCount() == 0) { if (response.getEntriesCount() == 0) {
response = null; response = null;
} }
} catch (ServiceException e) { } catch (ServiceException e) {
@ -1166,58 +1163,37 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public Long elementToNextKey(PathCacheEntry element) { public Long elementToPrevKey(PathCacheEntry element) {
return element.getEntryId(); return element.getEntryId();
} }
} }
@Override @Override
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId, public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
String pool, int repliesPerRequest) throws IOException { long poolId, int repliesPerRequest) throws IOException {
return new PathCacheEntriesIterator(prevId, repliesPerRequest, pool); return new PathCacheEntriesIterator(prevId, repliesPerRequest, poolId);
} }
@Override @Override
public void addCachePool(CachePoolInfo info) throws IOException { public CachePool addCachePool(CachePoolInfo info) throws IOException {
AddCachePoolRequestProto.Builder builder = AddCachePoolRequestProto.Builder builder =
AddCachePoolRequestProto.newBuilder(); AddCachePoolRequestProto.newBuilder();
builder.setPoolName(info.getPoolName()); builder.setInfo(PBHelper.convert(info));
if (info.getOwnerName() != null) {
builder.setOwnerName(info.getOwnerName());
}
if (info.getGroupName() != null) {
builder.setGroupName(info.getGroupName());
}
if (info.getMode() != null) {
builder.setMode(info.getMode());
}
if (info.getWeight() != null) {
builder.setWeight(info.getWeight());
}
try { try {
rpcProxy.addCachePool(null, builder.build()); return PBHelper.convert(
rpcProxy.addCachePool(null, builder.build()).getPool());
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override @Override
public void modifyCachePool(CachePoolInfo req) throws IOException { public void modifyCachePool(long poolId, CachePoolInfo info)
ModifyCachePoolRequestProto.Builder builder = throws IOException {
ModifyCachePoolRequestProto.newBuilder(); ModifyCachePoolRequestProto.Builder builder =
builder.setPoolName(req.getPoolName()); ModifyCachePoolRequestProto.newBuilder()
if (req.getOwnerName() != null) { .setPool(PBHelper.convert(new CachePool(poolId)))
builder.setOwnerName(req.getOwnerName()); .setInfo(PBHelper.convert(info));
}
if (req.getGroupName() != null) {
builder.setGroupName(req.getGroupName());
}
if (req.getMode() != null) {
builder.setMode(req.getMode());
}
if (req.getWeight() != null) {
builder.setWeight(req.getWeight());
}
try { try {
rpcProxy.modifyCachePool(null, builder.build()); rpcProxy.modifyCachePool(null, builder.build());
} catch (ServiceException e) { } catch (ServiceException e) {
@ -1226,32 +1202,30 @@ public class ClientNamenodeProtocolTranslatorPB implements
} }
@Override @Override
public void removeCachePool(String cachePoolName) throws IOException { public void removeCachePool(long poolId) throws IOException {
try { try {
rpcProxy.removeCachePool(null, rpcProxy.removeCachePool(null,
RemoveCachePoolRequestProto.newBuilder(). RemoveCachePoolRequestProto.newBuilder().
setPoolName(cachePoolName).build()); setPool(PBHelper.convert(new CachePool(poolId))).
build());
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
private static class BatchedPathDirectiveEntries private static class BatchedPathDirectiveEntries
implements BatchedEntries<CachePoolInfo> { implements BatchedEntries<CachePool> {
private final ListCachePoolsResponseProto proto; private final ListCachePoolsResponseProto proto;
public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) { public BatchedPathDirectiveEntries(ListCachePoolsResponseProto proto) {
this.proto = proto; this.proto = proto;
} }
@Override @Override
public CachePoolInfo get(int i) { public CachePool get(int i) {
ListCachePoolsResponseElementProto elem = proto.getElements(i); ListCachePoolsResponseElementProto elem = proto.getElements(i);
return new CachePoolInfo(elem.getPoolName()). return PBHelper.convert(elem.getPool());
setOwnerName(elem.getOwnerName()).
setGroupName(elem.getGroupName()).
setMode(elem.getMode()).
setWeight(elem.getWeight());
} }
@Override @Override
@ -1259,37 +1233,38 @@ public class ClientNamenodeProtocolTranslatorPB implements
return proto.getElementsCount(); return proto.getElementsCount();
} }
} }
private class CachePoolIterator
extends BatchedRemoteIterator<String, CachePoolInfo> {
public CachePoolIterator(String prevKey, int maxRepliesPerRequest) { private class CachePoolIterator
extends BatchedRemoteIterator<Long, CachePool> {
public CachePoolIterator(Long prevKey, int maxRepliesPerRequest) {
super(prevKey, maxRepliesPerRequest); super(prevKey, maxRepliesPerRequest);
} }
@Override @Override
public BatchedEntries<CachePoolInfo> makeRequest(String prevKey, public BatchedEntries<CachePool> makeRequest(Long prevKey,
int maxRepliesPerRequest) throws IOException { int maxRepliesPerRequest) throws IOException {
try { try {
return new BatchedPathDirectiveEntries( return new BatchedPathDirectiveEntries(
rpcProxy.listCachePools(null, rpcProxy.listCachePools(null,
ListCachePoolsRequestProto.newBuilder(). ListCachePoolsRequestProto.newBuilder().
setPrevPoolName(prevKey). setPrevPool(PBHelper.convert(new CachePool(prevKey))).
setMaxReplies(maxRepliesPerRequest).build())); setMaxReplies(maxRepliesPerRequest).
build()));
} catch (ServiceException e) { } catch (ServiceException e) {
throw ProtobufHelper.getRemoteException(e); throw ProtobufHelper.getRemoteException(e);
} }
} }
@Override @Override
public String elementToNextKey(CachePoolInfo element) { public Long elementToPrevKey(CachePool element) {
return element.getPoolName(); return element.getId();
} }
} }
@Override @Override
public RemoteIterator<CachePoolInfo> listCachePools(String prevKey, public RemoteIterator<CachePool> listCachePools(long prevPoolId,
int maxRepliesPerRequest) throws IOException { int maxRepliesPerRequest) throws IOException {
return new CachePoolIterator(prevKey, maxRepliesPerRequest); return new CachePoolIterator(prevPoolId, maxRepliesPerRequest);
} }
} }

View File

@ -32,10 +32,13 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates; import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -50,9 +53,15 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateFlagProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.DatanodeReportTypeProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatsResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ModifyCachePoolRequestProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheDirectiveProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.PathCacheEntryProto;
import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.SafeModeActionProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto; import org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
@ -114,6 +123,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.StorageInfo; import org.apache.hadoop.hdfs.server.common.StorageInfo;
import org.apache.hadoop.hdfs.server.namenode.CachePool;
import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature; import org.apache.hadoop.hdfs.server.namenode.CheckpointSignature;
import org.apache.hadoop.hdfs.server.namenode.INodeId; import org.apache.hadoop.hdfs.server.namenode.INodeId;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand; import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
@ -1493,6 +1503,74 @@ public class PBHelper {
return HdfsProtos.ChecksumTypeProto.valueOf(type.id); return HdfsProtos.ChecksumTypeProto.valueOf(type.id);
} }
public static PathCacheDirective convert(
PathCacheDirectiveProto directiveProto) {
CachePool pool = convert(directiveProto.getPool());
return new PathCacheDirective(directiveProto.getPath(), pool.getId());
}
public static PathCacheDirectiveProto convert(PathCacheDirective directive) {
PathCacheDirectiveProto.Builder builder =
PathCacheDirectiveProto.newBuilder()
.setPath(directive.getPath())
.setPool(PBHelper.convert(new CachePool(directive.getPoolId())));
return builder.build();
}
public static PathCacheEntry convert(PathCacheEntryProto entryProto) {
long entryId = entryProto.getId();
PathCacheDirective directive = convert(entryProto.getDirective());
return new PathCacheEntry(entryId, directive);
}
public static PathCacheEntryProto convert(PathCacheEntry entry) {
PathCacheEntryProto.Builder builder = PathCacheEntryProto.newBuilder()
.setId(entry.getEntryId())
.setDirective(PBHelper.convert(entry.getDirective()));
return builder.build();
}
public static CachePoolInfo convert(CachePoolInfoProto infoProto) {
CachePoolInfo.Builder builder =
CachePoolInfo.newBuilder().setPoolName(infoProto.getPoolName());
if (infoProto.hasOwnerName()) {
builder.setOwnerName(infoProto.getOwnerName());
}
if (infoProto.hasGroupName()) {
builder.setGroupName(infoProto.getGroupName());
}
if (infoProto.hasMode()) {
builder.setMode(new FsPermission((short) infoProto.getMode()));
}
if (infoProto.hasWeight()) {
builder.setWeight(infoProto.getWeight());
}
return builder.build();
}
public static CachePoolInfoProto convert(CachePoolInfo info) {
CachePoolInfoProto.Builder builder = CachePoolInfoProto.newBuilder()
.setPoolName(info.getPoolName())
.setOwnerName(info.getOwnerName())
.setGroupName(info.getGroupName())
.setMode(info.getMode().toShort())
.setWeight(info.getWeight());
return builder.build();
}
public static CachePool convert(CachePoolProto poolProto) {
CachePoolInfo info = convert(poolProto.getInfo());
CachePool pool = new CachePool(poolProto.getId(), info);
return pool;
}
public static CachePoolProto convert(CachePool pool) {
CachePoolProto.Builder builder = CachePoolProto.newBuilder()
.setId(pool.getId())
.setInfo(convert(pool.getInfo()));
return builder.build();
}
public static InputStream vintPrefixed(final InputStream input) public static InputStream vintPrefixed(final InputStream input)
throws IOException { throws IOException {
final int firstByte = input.read(); final int firstByte = input.read();

View File

@ -19,25 +19,26 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import java.util.ArrayList; import java.util.ArrayList;
import java.util.Iterator;
import java.util.List; import java.util.List;
import java.util.Map;
import java.util.SortedMap; import java.util.SortedMap;
import java.util.TreeMap; import java.util.TreeMap;
import java.util.Map.Entry;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.UnexpectedAddPathCacheDirectiveException;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.RemovePermissionDeniedException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.UnexpectedRemovePathCacheEntryException;
import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.Fallible;
/** /**
@ -64,14 +65,25 @@ final class CacheManager {
/** /**
* Cache pools, sorted by name. * Cache pools, sorted by name.
*/ */
private final TreeMap<String, CachePool> cachePools = private final TreeMap<String, CachePool> cachePoolsByName =
new TreeMap<String, CachePool>(); new TreeMap<String, CachePool>();
/**
* Cache pools, sorted by ID
*/
private final TreeMap<Long, CachePool> cachePoolsById =
new TreeMap<Long, CachePool>();
/** /**
* The entry ID to use for a new entry. * The entry ID to use for a new entry.
*/ */
private long nextEntryId; private long nextEntryId;
/**
* The pool ID to use for a new pool.
*/
private long nextPoolId;
CacheManager(FSDirectory dir, Configuration conf) { CacheManager(FSDirectory dir, Configuration conf) {
// TODO: support loading and storing of the CacheManager state // TODO: support loading and storing of the CacheManager state
clear(); clear();
@ -80,26 +92,35 @@ final class CacheManager {
synchronized void clear() { synchronized void clear() {
entriesById.clear(); entriesById.clear();
entriesByDirective.clear(); entriesByDirective.clear();
cachePoolsByName.clear();
cachePoolsById.clear();
nextEntryId = 1; nextEntryId = 1;
nextPoolId = 1;
} }
synchronized long getNextEntryId() throws IOException { synchronized long getNextEntryId() throws IOException {
if (nextEntryId == Long.MAX_VALUE) { if (nextEntryId == Long.MAX_VALUE) {
throw new IOException("no more available IDs"); throw new IOException("no more available entry IDs");
} }
return nextEntryId++; return nextEntryId++;
} }
synchronized long getNextPoolId() throws IOException {
if (nextPoolId == Long.MAX_VALUE) {
throw new IOException("no more available pool IDs");
}
return nextPoolId++;
}
private synchronized Fallible<PathCacheEntry> addDirective( private synchronized Fallible<PathCacheEntry> addDirective(
PathCacheDirective directive, FSPermissionChecker pc) { FSPermissionChecker pc, PathCacheDirective directive) {
CachePool pool = cachePools.get(directive.getPool()); CachePool pool = cachePoolsById.get(directive.getPoolId());
if (pool == null) { if (pool == null) {
LOG.info("addDirective " + directive + ": pool not found."); LOG.info("addDirective " + directive + ": pool not found.");
return new Fallible<PathCacheEntry>( return new Fallible<PathCacheEntry>(
new InvalidPoolNameError(directive)); new InvalidPoolError(directive));
} }
if (!pc.checkWritePermission(pool.getOwnerName(), if (!pc.checkPermission(pool, FsAction.WRITE)) {
pool.getGroupName(), pool.getMode())) {
LOG.info("addDirective " + directive + ": write permission denied."); LOG.info("addDirective " + directive + ": write permission denied.");
return new Fallible<PathCacheEntry>( return new Fallible<PathCacheEntry>(
new PoolWritePermissionDeniedError(directive)); new PoolWritePermissionDeniedError(directive));
@ -134,17 +155,17 @@ final class CacheManager {
} }
public synchronized List<Fallible<PathCacheEntry>> addDirectives( public synchronized List<Fallible<PathCacheEntry>> addDirectives(
List<PathCacheDirective> directives, FSPermissionChecker pc) { FSPermissionChecker pc, List<PathCacheDirective> directives) {
ArrayList<Fallible<PathCacheEntry>> results = ArrayList<Fallible<PathCacheEntry>> results =
new ArrayList<Fallible<PathCacheEntry>>(directives.size()); new ArrayList<Fallible<PathCacheEntry>>(directives.size());
for (PathCacheDirective directive: directives) { for (PathCacheDirective directive: directives) {
results.add(addDirective(directive, pc)); results.add(addDirective(pc, directive));
} }
return results; return results;
} }
private synchronized Fallible<Long> removeEntry(long entryId, private synchronized Fallible<Long> removeEntry(FSPermissionChecker pc,
FSPermissionChecker pc) { long entryId) {
// Check for invalid IDs. // Check for invalid IDs.
if (entryId <= 0) { if (entryId <= 0) {
LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID."); LOG.info("removeEntry " + entryId + ": invalid non-positive entry ID.");
@ -156,23 +177,20 @@ final class CacheManager {
LOG.info("removeEntry " + entryId + ": entry not found."); LOG.info("removeEntry " + entryId + ": entry not found.");
return new Fallible<Long>(new NoSuchIdException(entryId)); return new Fallible<Long>(new NoSuchIdException(entryId));
} }
CachePool pool = cachePools.get(existing.getDirective().getPool()); CachePool pool = cachePoolsById.get(existing.getDirective().getPoolId());
if (pool == null) { if (pool == null) {
LOG.info("removeEntry " + entryId + ": pool not found for directive " + LOG.info("removeEntry " + entryId + ": pool not found for directive " +
existing.getDirective()); existing.getDirective());
return new Fallible<Long>( return new Fallible<Long>(
new UnexpectedRemovePathCacheEntryException(entryId)); new UnexpectedRemovePathCacheEntryException(entryId));
} }
if (!pc.isSuperUser()) { if (!pc.checkPermission(pool, FsAction.WRITE)) {
if (!pc.checkWritePermission(pool.getOwnerName(), LOG.info("removeEntry " + entryId + ": write permission denied to " +
pool.getGroupName(), pool.getMode())) { "pool " + pool + " for entry " + existing);
LOG.info("removeEntry " + entryId + ": write permission denied to " + return new Fallible<Long>(
"pool " + pool + " for entry " + existing); new RemovePermissionDeniedException(entryId));
return new Fallible<Long>(
new RemovePermissionDeniedException(entryId));
}
} }
// Remove the corresponding entry in entriesByDirective. // Remove the corresponding entry in entriesByDirective.
if (entriesByDirective.remove(existing.getDirective()) == null) { if (entriesByDirective.remove(existing.getDirective()) == null) {
LOG.warn("removeEntry " + entryId + ": failed to find existing entry " + LOG.warn("removeEntry " + entryId + ": failed to find existing entry " +
@ -184,36 +202,43 @@ final class CacheManager {
return new Fallible<Long>(entryId); return new Fallible<Long>(entryId);
} }
public synchronized List<Fallible<Long>> removeEntries(List<Long> entryIds, public synchronized List<Fallible<Long>> removeEntries(FSPermissionChecker pc,
FSPermissionChecker pc) { List<Long> entryIds) {
ArrayList<Fallible<Long>> results = ArrayList<Fallible<Long>> results =
new ArrayList<Fallible<Long>>(entryIds.size()); new ArrayList<Fallible<Long>>(entryIds.size());
for (Long entryId : entryIds) { for (Long entryId : entryIds) {
results.add(removeEntry(entryId, pc)); results.add(removeEntry(pc, entryId));
} }
return results; return results;
} }
public synchronized List<PathCacheEntry> listPathCacheEntries(long prevId, public synchronized List<PathCacheEntry> listPathCacheEntries(
String pool, int maxReplies) { FSPermissionChecker pc, long prevId, Long poolId, int maxReplies) {
final int MAX_PRE_ALLOCATED_ENTRIES = 16; final int MAX_PRE_ALLOCATED_ENTRIES = 16;
ArrayList<PathCacheEntry> replies = ArrayList<PathCacheEntry> replies = new ArrayList<PathCacheEntry>(
new ArrayList<PathCacheEntry>(Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies)); Math.min(MAX_PRE_ALLOCATED_ENTRIES, maxReplies));
int numReplies = 0; int numReplies = 0;
SortedMap<Long, PathCacheEntry> tailMap = entriesById.tailMap(prevId + 1); SortedMap<Long, PathCacheEntry> tailMap = entriesById.tailMap(prevId + 1);
for (Entry<Long, PathCacheEntry> cur : tailMap.entrySet()) { for (PathCacheEntry entry : tailMap.values()) {
if (numReplies >= maxReplies) { if (numReplies >= maxReplies) {
return replies; return replies;
} }
if (pool.isEmpty() || cur.getValue().getDirective(). long entryPoolId = entry.getDirective().getPoolId();
getPool().equals(pool)) { if (poolId == null || poolId <= 0 || entryPoolId == poolId) {
replies.add(cur.getValue()); if (pc.checkPermission(
numReplies++; cachePoolsById.get(entryPoolId), FsAction.EXECUTE)) {
replies.add(entry);
numReplies++;
}
} }
} }
return replies; return replies;
} }
synchronized CachePool getCachePool(long id) {
return cachePoolsById.get(id);
}
/** /**
* Create a cache pool. * Create a cache pool.
* *
@ -221,22 +246,24 @@ final class CacheManager {
* *
* @param info * @param info
* The info for the cache pool to create. * The info for the cache pool to create.
* @return created CachePool
*/ */
public synchronized void addCachePool(CachePoolInfo info) public synchronized CachePool addCachePool(CachePoolInfo info)
throws IOException { throws IOException {
String poolName = info.getPoolName(); String poolName = info.getPoolName();
if (poolName.isEmpty()) { if (poolName == null || poolName.isEmpty()) {
throw new IOException("invalid empty cache pool name"); throw new IOException("invalid empty cache pool name");
} }
CachePool pool = cachePools.get(poolName); if (cachePoolsByName.containsKey(poolName)) {
if (pool != null) {
throw new IOException("cache pool " + poolName + " already exists."); throw new IOException("cache pool " + poolName + " already exists.");
} }
CachePool cachePool = new CachePool(poolName, CachePool cachePool = new CachePool(getNextPoolId(), poolName,
info.getOwnerName(), info.getGroupName(), info.getMode(), info.getOwnerName(), info.getGroupName(), info.getMode(),
info.getWeight()); info.getWeight());
cachePools.put(poolName, cachePool); cachePoolsById.put(cachePool.getId(), cachePool);
cachePoolsByName.put(poolName, cachePool);
LOG.info("created new cache pool " + cachePool); LOG.info("created new cache pool " + cachePool);
return cachePool;
} }
/** /**
@ -247,46 +274,62 @@ final class CacheManager {
* @param info * @param info
* The info for the cache pool to modify. * The info for the cache pool to modify.
*/ */
public synchronized void modifyCachePool(CachePoolInfo info) public synchronized void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException { throws IOException {
String poolName = info.getPoolName(); if (poolId <= 0) {
if (poolName.isEmpty()) { throw new IOException("invalid pool id " + poolId);
throw new IOException("invalid empty cache pool name");
} }
CachePool pool = cachePools.get(poolName); if (!cachePoolsById.containsKey(poolId)) {
if (pool == null) { throw new IOException("cache pool id " + poolId + " does not exist.");
throw new IOException("cache pool " + poolName + " does not exist.");
} }
CachePool pool = cachePoolsById.get(poolId);
// Remove the old CachePoolInfo
removeCachePool(poolId);
// Build up the new CachePoolInfo
CachePoolInfo.Builder newInfo = CachePoolInfo.newBuilder(pool.getInfo());
StringBuilder bld = new StringBuilder(); StringBuilder bld = new StringBuilder();
String prefix = ""; String prefix = "";
if (info.getPoolName() != null) {
newInfo.setPoolName(info.getPoolName());
bld.append(prefix).
append("set name to ").append(info.getOwnerName());
prefix = "; ";
}
if (info.getOwnerName() != null) { if (info.getOwnerName() != null) {
pool.setOwnerName(info.getOwnerName()); newInfo.setOwnerName(info.getOwnerName());
bld.append(prefix). bld.append(prefix).
append("set owner to ").append(info.getOwnerName()); append("set owner to ").append(info.getOwnerName());
prefix = "; "; prefix = "; ";
} }
if (info.getGroupName() != null) { if (info.getGroupName() != null) {
pool.setGroupName(info.getGroupName()); newInfo.setGroupName(info.getGroupName());
bld.append(prefix). bld.append(prefix).
append("set group to ").append(info.getGroupName()); append("set group to ").append(info.getGroupName());
prefix = "; "; prefix = "; ";
} }
if (info.getMode() != null) { if (info.getMode() != null) {
pool.setMode(info.getMode()); newInfo.setMode(info.getMode());
bld.append(prefix). bld.append(prefix).
append(String.format("set mode to 0%3o", info.getMode())); append(String.format("set mode to ", info.getMode()));
prefix = "; "; prefix = "; ";
} }
if (info.getWeight() != null) { if (info.getWeight() != null) {
pool.setWeight(info.getWeight()); newInfo.setWeight(info.getWeight());
bld.append(prefix). bld.append(prefix).
append("set weight to ").append(info.getWeight()); append("set weight to ").append(info.getWeight());
prefix = "; "; prefix = "; ";
} }
if (prefix.isEmpty()) { if (prefix.isEmpty()) {
bld.append("no changes."); bld.append("no changes.");
} else {
pool.setInfo(newInfo.build());
} }
LOG.info("modified " + poolName + "; " + bld.toString()); // Put the newly modified info back in
cachePoolsById.put(poolId, pool);
cachePoolsByName.put(info.getPoolName(), pool);
LOG.info("modified pool id " + pool.getId()
+ " (" + pool.getInfo().getPoolName() + "); "
+ bld.toString());
} }
/** /**
@ -294,27 +337,38 @@ final class CacheManager {
* *
* Only the superuser should be able to call this function. * Only the superuser should be able to call this function.
* *
* @param poolName * @param poolId
* The name for the cache pool to remove. * The id of the cache pool to remove.
*/ */
public synchronized void removeCachePool(String poolName) public synchronized void removeCachePool(long poolId) throws IOException {
throws IOException { if (!cachePoolsById.containsKey(poolId)) {
CachePool pool = cachePools.remove(poolName); throw new IOException("can't remove nonexistent cache pool id " + poolId);
if (pool == null) {
throw new IOException("can't remove nonexistent cache pool " + poolName);
} }
// Remove all the entries associated with the pool
Iterator<Map.Entry<Long, PathCacheEntry>> it =
entriesById.entrySet().iterator();
while (it.hasNext()) {
Map.Entry<Long, PathCacheEntry> entry = it.next();
if (entry.getValue().getDirective().getPoolId() == poolId) {
it.remove();
entriesByDirective.remove(entry.getValue().getDirective());
}
}
// Remove the pool
CachePool pool = cachePoolsById.remove(poolId);
cachePoolsByName.remove(pool.getInfo().getPoolName());
} }
public synchronized List<CachePoolInfo> public synchronized List<CachePool> listCachePools(Long prevKey,
listCachePools(FSPermissionChecker pc, String prevKey, int maxRepliesPerRequest) {
int maxRepliesPerRequest) {
final int MAX_PREALLOCATED_REPLIES = 16; final int MAX_PREALLOCATED_REPLIES = 16;
ArrayList<CachePoolInfo> results = ArrayList<CachePool> results =
new ArrayList<CachePoolInfo>(Math.min(MAX_PREALLOCATED_REPLIES, new ArrayList<CachePool>(Math.min(MAX_PREALLOCATED_REPLIES,
maxRepliesPerRequest)); maxRepliesPerRequest));
SortedMap<String, CachePool> tailMap = cachePools.tailMap(prevKey, false); SortedMap<Long, CachePool> tailMap =
for (Entry<String, CachePool> cur : tailMap.entrySet()) { cachePoolsById.tailMap(prevKey, false);
results.add(cur.getValue().getInfo(pc)); for (CachePool pool : tailMap.values()) {
results.add(pool);
} }
return results; return results;
} }

View File

@ -19,123 +19,119 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException; import java.io.IOException;
import javax.annotation.Nonnull; import org.apache.commons.lang.builder.EqualsBuilder;
import org.apache.commons.lang.builder.HashCodeBuilder;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo.Builder;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
/** /**
* The NameNode uses CachePools to manage cache resources on the DataNodes. * A CachePool describes a set of cache resources being managed by the NameNode.
* User caching requests are billed to the cache pool specified in the request.
*
* CachePools are uniquely identified by a numeric id as well as the
* {@link CachePoolInfo} pool name. Mutable metadata is contained in
* CachePoolInfo, including pool name, owner, group, and permissions.
* See this class for more details.
*/ */
public final class CachePool { public final class CachePool {
public static final Log LOG = LogFactory.getLog(CachePool.class); public static final Log LOG = LogFactory.getLog(CachePool.class);
@Nonnull private final long id;
private final String poolName;
@Nonnull private CachePoolInfo info;
private String ownerName;
@Nonnull public CachePool(long id) {
private String groupName; this.id = id;
this.info = null;
private int mode; }
private int weight; CachePool(long id, String poolName, String ownerName, String groupName,
FsPermission mode, Integer weight) throws IOException {
public static String getCurrentUserPrimaryGroupName() throws IOException { this.id = id;
UserGroupInformation ugi= NameNode.getRemoteUser(); // Set CachePoolInfo default fields if null
String[] groups = ugi.getGroupNames(); if (poolName == null || poolName.isEmpty()) {
if (groups.length == 0) { throw new IOException("invalid empty cache pool name");
throw new IOException("failed to get group names from UGI " + ugi);
} }
return groups[0]; UserGroupInformation ugi = null;
} if (ownerName == null) {
ugi = NameNode.getRemoteUser();
public CachePool(String poolName, String ownerName, String groupName, ownerName = ugi.getShortUserName();
Integer mode, Integer weight) throws IOException { }
this.poolName = poolName; if (groupName == null) {
this.ownerName = ownerName != null ? ownerName : if (ugi == null) {
NameNode.getRemoteUser().getShortUserName(); ugi = NameNode.getRemoteUser();
this.groupName = groupName != null ? groupName : }
getCurrentUserPrimaryGroupName(); String[] groups = ugi.getGroupNames();
this.mode = mode != null ? mode : 0644; if (groups.length == 0) {
this.weight = weight != null ? weight : 100; throw new IOException("failed to get group names from UGI " + ugi);
}
groupName = groups[0];
}
if (mode == null) {
mode = FsPermission.getDirDefault();
}
if (weight == null) {
weight = 100;
}
CachePoolInfo.Builder builder = CachePoolInfo.newBuilder();
builder.setPoolName(poolName).setOwnerName(ownerName)
.setGroupName(groupName).setMode(mode).setWeight(weight);
this.info = builder.build();
} }
public String getName() { public CachePool(long id, CachePoolInfo info) {
return poolName; this.id = id;
this.info = info;
} }
public String getOwnerName() { /**
return ownerName; * @return id of the pool
*/
public long getId() {
return id;
} }
public CachePool setOwnerName(String ownerName) {
this.ownerName = ownerName;
return this;
}
public String getGroupName() {
return groupName;
}
public CachePool setGroupName(String groupName) {
this.groupName = groupName;
return this;
}
public int getMode() {
return mode;
}
public CachePool setMode(int mode) {
this.mode = mode;
return this;
}
public int getWeight() {
return weight;
}
public CachePool setWeight(int weight) {
this.weight = weight;
return this;
}
/** /**
* Get information about this cache pool. * Get information about this cache pool.
* *
* @param fullInfo
* If true, only the name will be returned (i.e., what you
* would get if you didn't have read permission for this pool.)
* @return * @return
* Cache pool information. * Cache pool information.
*/ */
public CachePoolInfo getInfo(boolean fullInfo) { public CachePoolInfo getInfo() {
CachePoolInfo info = new CachePoolInfo(poolName); return info;
if (!fullInfo) {
return info;
}
return info.setOwnerName(ownerName).
setGroupName(groupName).
setMode(mode).
setWeight(weight);
} }
public CachePoolInfo getInfo(FSPermissionChecker pc) { void setInfo(CachePoolInfo info) {
return getInfo(pc.checkReadPermission(ownerName, groupName, mode)); this.info = info;
} }
public String toString() { public String toString() {
return new StringBuilder(). return new StringBuilder().
append("{ ").append("poolName:").append(poolName). append("{ ").append("id:").append(id).
append(", ownerName:").append(ownerName). append(", info:").append(info.toString()).
append(", groupName:").append(groupName).
append(", mode:").append(String.format("%3o", mode)).
append(", weight:").append(weight).
append(" }").toString(); append(" }").toString();
} }
@Override
public int hashCode() {
return new HashCodeBuilder().append(id).append(info).hashCode();
}
@Override
public boolean equals(Object obj) {
if (obj == null) { return false; }
if (obj == this) { return true; }
if (obj.getClass() != getClass()) {
return false;
}
CachePool rhs = (CachePool)obj;
return new EqualsBuilder()
.append(id, rhs.id)
.append(info, rhs.info)
.isEquals();
}
} }

View File

@ -6701,7 +6701,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return; // Return previous response return; // Return previous response
} }
boolean success = false; boolean success = false;
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
@ -6749,6 +6748,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
} }
@SuppressWarnings("unchecked")
List<Fallible<PathCacheEntry>> addPathCacheDirectives( List<Fallible<PathCacheEntry>> addPathCacheDirectives(
List<PathCacheDirective> directives) throws IOException { List<PathCacheDirective> directives) throws IOException {
CacheEntryWithPayload retryCacheEntry = CacheEntryWithPayload retryCacheEntry =
@ -6759,7 +6759,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
boolean success = false; boolean success = false;
List<Fallible<PathCacheEntry>> results = null; List<Fallible<PathCacheEntry>> results = null;
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
@ -6767,7 +6766,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throw new SafeModeException( throw new SafeModeException(
"Cannot add path cache directive", safeMode); "Cannot add path cache directive", safeMode);
} }
results = cacheManager.addDirectives(directives, pc); results = cacheManager.addDirectives(pc, directives);
//getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119 //getEditLog().logAddPathCacheDirectives(results); FIXME: HDFS-5119
success = true; success = true;
} finally { } finally {
@ -6775,7 +6774,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (success) { if (success) {
getEditLog().logSync(); getEditLog().logSync();
} }
if (isAuditEnabled() && isExternalInvocation()) { if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addPathCacheDirectives", null, null, null); logAuditEvent(success, "addPathCacheDirectives", null, null, null);
} }
RetryCache.setState(retryCacheEntry, success, results); RetryCache.setState(retryCacheEntry, success, results);
@ -6783,58 +6782,50 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return results; return results;
} }
List<Fallible<Long>> removePathCacheEntries(List<Long> ids) throws IOException { @SuppressWarnings("unchecked")
CacheEntryWithPayload retryCacheEntry = List<Fallible<Long>> removePathCacheEntries(List<Long> ids)
RetryCache.waitForCompletion(retryCache, null); throws IOException {
if (retryCacheEntry != null && retryCacheEntry.isSuccess()) {
return (List<Fallible<Long>>) retryCacheEntry.getPayload();
}
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
boolean success = false; boolean success = false;
List<Fallible<Long>> results = null; List<Fallible<Long>> results = null;
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (isInSafeMode()) { if (isInSafeMode()) {
throw new SafeModeException( throw new SafeModeException(
"Cannot remove path cache directives", safeMode); "Cannot add path cache directive", safeMode);
} }
results = cacheManager.removeEntries(ids, pc); results = cacheManager.removeEntries(pc, ids);
//getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119 //getEditLog().logRemovePathCacheEntries(results); FIXME: HDFS-5119
success = true; success = true;
} finally { } finally {
writeUnlock(); writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) { if (success) {
getEditLog().logSync();
}
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(success, "removePathCacheEntries", null, null, null); logAuditEvent(success, "removePathCacheEntries", null, null, null);
} }
RetryCache.setState(retryCacheEntry, success, results);
} }
getEditLog().logSync();
return results; return results;
} }
List<PathCacheEntry> listPathCacheEntries(long startId, String pool, List<PathCacheEntry> listPathCacheEntries(long startId,
int maxReplies) throws IOException { Long poolId, int maxReplies) throws IOException {
checkOperation(OperationCategory.READ); LOG.info("listPathCacheEntries with " + startId + " " + poolId);
readLock(); final FSPermissionChecker pc = getPermissionChecker();
try { return cacheManager.listPathCacheEntries(pc, startId, poolId, maxReplies);
checkOperation(OperationCategory.READ);
return cacheManager.listPathCacheEntries(startId, pool, maxReplies);
} finally {
readUnlock();
}
} }
public void addCachePool(CachePoolInfo req) throws IOException { public CachePool addCachePool(CachePoolInfo req) throws IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntryWithPayload cacheEntry =
RetryCache.waitForCompletion(retryCache, null);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return (CachePool)cacheEntry.getPayload(); // Return previous response
} }
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
boolean success = false; CachePool pool = null;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) { if (!pc.isSuperUser()) {
@ -6845,29 +6836,28 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
throw new SafeModeException( throw new SafeModeException(
"Cannot add cache pool " + req.getPoolName(), safeMode); "Cannot add cache pool " + req.getPoolName(), safeMode);
} }
cacheManager.addCachePool(req); pool = cacheManager.addCachePool(req);
RetryCache.setState(cacheEntry, true);
//getEditLog().logAddCachePool(req); // FIXME: HDFS-5119 //getEditLog().logAddCachePool(req); // FIXME: HDFS-5119
success = true;
} finally { } finally {
writeUnlock(); writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "addCachePool", req.getPoolName(), null, null);
}
RetryCache.setState(cacheEntry, success);
} }
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "addCachePool", req.getPoolName(), null, null);
}
return pool;
} }
public void modifyCachePool(CachePoolInfo req) throws IOException { public void modifyCachePool(long poolId, CachePoolInfo info)
throws IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache); CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) { if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response return; // Return previous response
} }
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
boolean success = false;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) { if (!pc.isSuperUser()) {
@ -6876,64 +6866,62 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
} }
if (isInSafeMode()) { if (isInSafeMode()) {
throw new SafeModeException( throw new SafeModeException(
"Cannot modify cache pool " + req.getPoolName(), safeMode); "Cannot modify cache pool " + info.getPoolName(), safeMode);
} }
cacheManager.modifyCachePool(req); cacheManager.modifyCachePool(poolId, info);
RetryCache.setState(cacheEntry, true);
//getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119 //getEditLog().logModifyCachePool(req); // FIXME: HDFS-5119
success = true;
} finally { } finally {
writeUnlock(); writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "modifyCachePool", req.getPoolName(), null, null);
}
RetryCache.setState(cacheEntry, success);
} }
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "modifyCachePool", info.getPoolName(), null, null);
}
} }
public void removeCachePool(String cachePoolName) throws IOException { public void removeCachePool(long poolId) throws IOException {
final FSPermissionChecker pc = getPermissionChecker(); final FSPermissionChecker pc = getPermissionChecker();
CacheEntry cacheEntry = RetryCache.waitForCompletion(retryCache);
if (cacheEntry != null && cacheEntry.isSuccess()) {
return; // Return previous response
}
checkOperation(OperationCategory.WRITE);
writeLock(); writeLock();
boolean success = false; CachePool pool;
try { try {
checkOperation(OperationCategory.WRITE); checkOperation(OperationCategory.WRITE);
if (!pc.isSuperUser()) { if (!pc.isSuperUser()) {
throw new AccessControlException("Non-super users cannot " + throw new AccessControlException("Non-super users cannot " +
"remove cache pools."); "remove cache pools.");
} }
pool = cacheManager.getCachePool(poolId);
if (isInSafeMode()) { if (isInSafeMode()) {
String identifier;
if (pool == null) {
identifier = "with id " + Long.toString(poolId);
} else {
identifier = pool.getInfo().getPoolName();
}
throw new SafeModeException( throw new SafeModeException(
"Cannot remove cache pool " + cachePoolName, safeMode); "Cannot remove cache pool " + identifier, safeMode);
} }
cacheManager.removeCachePool(cachePoolName); cacheManager.removeCachePool(poolId);
//getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119 //getEditLog().logRemoveCachePool(req); // FIXME: HDFS-5119
success = true;
} finally { } finally {
writeUnlock(); writeUnlock();
if (isAuditEnabled() && isExternalInvocation()) {
logAuditEvent(success, "removeCachePool", cachePoolName, null, null);
}
RetryCache.setState(cacheEntry, success);
} }
getEditLog().logSync(); getEditLog().logSync();
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
logAuditEvent(true, "removeCachePool", pool.getInfo().getPoolName(),
null, null);
}
} }
public List<CachePoolInfo> listCachePools(String prevKey, public List<CachePool> listCachePools(long prevKey,
int maxRepliesPerRequest) throws IOException { int maxRepliesPerRequest) throws IOException {
final FSPermissionChecker pc = getPermissionChecker(); List<CachePool> results;
List<CachePoolInfo> results;
checkOperation(OperationCategory.READ);
readLock(); readLock();
try { try {
checkOperation(OperationCategory.READ); checkOperation(OperationCategory.READ);
results = cacheManager.listCachePools(pc, prevKey, maxRepliesPerRequest); results = cacheManager.listCachePools(prevKey, maxRepliesPerRequest);
} finally { } finally {
readUnlock(); readUnlock();
} }

View File

@ -17,7 +17,6 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Arrays; import java.util.Arrays;
import java.util.Collections; import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
@ -29,6 +28,7 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -257,38 +257,29 @@ class FSPermissionChecker {
} }
/** /**
* Check if this CachePool can be accessed. * Whether a cache pool can be accessed by the current context
* *
* @param pc * @param pool CachePool being accessed
* Permission checker object with user name and groups. * @param access type of action being performed on the cache pool
* @param write * @return if the pool can be accessed
* True if we care about write access; false otherwise.
* @return
* True only if the cache pool is accessible.
*/ */
private boolean checkPermission(String userName, public boolean checkPermission(CachePool pool, FsAction access) {
String groupName, int mode, int mask) { CachePoolInfo info = pool.getInfo();
if ((mode & mask) != 0) { FsPermission mode = info.getMode();
if (isSuperUser()) {
return true; return true;
} }
if (((mode & (mask << 6)) != 0) if (user.equals(info.getOwnerName())
&& (getUser().equals(userName))) { && mode.getUserAction().implies(access)) {
return true; return true;
} }
if (((mode & (mask << 6)) != 0) if (groups.contains(info.getGroupName())
&& (containsGroup(groupName))) { && mode.getGroupAction().implies(access)) {
return true;
}
if (mode.getOtherAction().implies(access)) {
return true; return true;
} }
return false; return false;
} }
public boolean checkWritePermission(String userName,
String groupName, int mode) {
return checkPermission(userName, groupName, mode, 02);
}
public boolean checkReadPermission(String userName,
String groupName, int mode) {
return checkPermission(userName, groupName, mode, 04);
}
} }

View File

@ -31,13 +31,11 @@ import java.util.Arrays;
import java.util.Collection; import java.util.Collection;
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import java.util.NoSuchElementException;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BatchedRemoteIterator; import org.apache.hadoop.fs.BatchedRemoteIterator;
import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
@ -62,9 +60,9 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider; import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@ -1225,72 +1223,73 @@ class NameNodeRpcServer implements NamenodeProtocols {
private class ServerSidePathCacheEntriesIterator private class ServerSidePathCacheEntriesIterator
extends BatchedRemoteIterator<Long, PathCacheEntry> { extends BatchedRemoteIterator<Long, PathCacheEntry> {
private final String pool; private final Long poolId;
public ServerSidePathCacheEntriesIterator(Long firstKey, public ServerSidePathCacheEntriesIterator(Long firstKey,
int maxRepliesPerRequest, String pool) { int maxRepliesPerRequest, Long poolId) {
super(firstKey, maxRepliesPerRequest); super(firstKey, maxRepliesPerRequest);
this.pool = pool; this.poolId = poolId;
} }
@Override @Override
public BatchedEntries<PathCacheEntry> makeRequest( public BatchedEntries<PathCacheEntry> makeRequest(
Long nextKey, int maxRepliesPerRequest) throws IOException { Long prevKey, int maxRepliesPerRequest) throws IOException {
return new BatchedListEntries<PathCacheEntry>( return new BatchedListEntries<PathCacheEntry>(
namesystem.listPathCacheEntries(nextKey, pool, namesystem.listPathCacheEntries(prevKey, poolId,
maxRepliesPerRequest)); maxRepliesPerRequest));
} }
@Override @Override
public Long elementToNextKey(PathCacheEntry entry) { public Long elementToPrevKey(PathCacheEntry entry) {
return entry.getEntryId(); return entry.getEntryId();
} }
} }
@Override @Override
public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId, String pool, public RemoteIterator<PathCacheEntry> listPathCacheEntries(long prevId,
int maxReplies) throws IOException { long poolId, int maxReplies) throws IOException {
return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, pool); return new ServerSidePathCacheEntriesIterator(prevId, maxReplies, poolId);
} }
@Override @Override
public void addCachePool(CachePoolInfo info) throws IOException { public CachePool addCachePool(CachePoolInfo info) throws IOException {
namesystem.addCachePool(info); return namesystem.addCachePool(info);
} }
@Override @Override
public void modifyCachePool(CachePoolInfo info) throws IOException { public void modifyCachePool(long poolId, CachePoolInfo info)
namesystem.modifyCachePool(info); throws IOException {
namesystem.modifyCachePool(poolId, info);
} }
@Override @Override
public void removeCachePool(String cachePoolName) throws IOException { public void removeCachePool(long poolId) throws IOException {
namesystem.removeCachePool(cachePoolName); namesystem.removeCachePool(poolId);
} }
private class ServerSideCachePoolIterator private class ServerSideCachePoolIterator
extends BatchedRemoteIterator<String, CachePoolInfo> { extends BatchedRemoteIterator<Long, CachePool> {
public ServerSideCachePoolIterator(String prevKey, int maxRepliesPerRequest) { public ServerSideCachePoolIterator(long prevId, int maxRepliesPerRequest) {
super(prevKey, maxRepliesPerRequest); super(prevId, maxRepliesPerRequest);
} }
@Override @Override
public BatchedEntries<CachePoolInfo> makeRequest(String prevKey, public BatchedEntries<CachePool> makeRequest(Long prevId,
int maxRepliesPerRequest) throws IOException { int maxRepliesPerRequest) throws IOException {
return new BatchedListEntries<CachePoolInfo>( return new BatchedListEntries<CachePool>(
namesystem.listCachePools(prevKey, maxRepliesPerRequest)); namesystem.listCachePools(prevId, maxRepliesPerRequest));
} }
@Override @Override
public String elementToNextKey(CachePoolInfo element) { public Long elementToPrevKey(CachePool element) {
return element.getPoolName(); return element.getId();
} }
} }
@Override @Override
public RemoteIterator<CachePoolInfo> listCachePools(String prevKey, public RemoteIterator<CachePool> listCachePools(long prevPoolId,
int maxRepliesPerRequest) throws IOException { int maxRepliesPerRequest) throws IOException {
return new ServerSideCachePoolIterator(prevKey, maxRepliesPerRequest); return new ServerSideCachePoolIterator(prevPoolId, maxRepliesPerRequest);
} }
} }

View File

@ -363,9 +363,27 @@ message IsFileClosedResponseProto {
required bool result = 1; required bool result = 1;
} }
message CachePoolInfoProto {
optional string poolName = 1;
optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
}
message CachePoolProto {
optional int64 id = 1;
optional CachePoolInfoProto info = 2;
}
message PathCacheDirectiveProto { message PathCacheDirectiveProto {
required string path = 1; required string path = 1;
required string pool = 2; required CachePoolProto pool = 2;
}
message PathCacheEntryProto {
required int64 id = 1;
optional PathCacheDirectiveProto directive = 2;
} }
message AddPathCacheDirectivesRequestProto { message AddPathCacheDirectivesRequestProto {
@ -399,53 +417,41 @@ enum RemovePathCacheEntryErrorProto {
} }
message ListPathCacheEntriesRequestProto { message ListPathCacheEntriesRequestProto {
required int64 prevId = 1; required PathCacheEntryProto prevEntry = 1;
required string pool = 2; required CachePoolProto pool = 2;
optional int32 maxReplies = 3; optional int32 maxReplies = 3;
} }
message ListPathCacheEntriesElementProto {
required int64 id = 1;
required string path = 2;
required string pool = 3;
}
message ListPathCacheEntriesResponseProto { message ListPathCacheEntriesResponseProto {
repeated ListPathCacheEntriesElementProto elements = 1; repeated PathCacheEntryProto entries = 1;
required bool hasMore = 2; required bool hasMore = 2;
} }
message AddCachePoolRequestProto { message AddCachePoolRequestProto {
required string poolName = 1; required CachePoolInfoProto info = 1;
optional string ownerName = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
} }
message AddCachePoolResponseProto { // void response message AddCachePoolResponseProto {
required CachePoolProto pool = 1;
} }
message ModifyCachePoolRequestProto { message ModifyCachePoolRequestProto {
required string poolName = 1; required CachePoolProto pool = 1;
optional string ownerName = 2; required CachePoolInfoProto info = 2;
optional string groupName = 3;
optional int32 mode = 4;
optional int32 weight = 5;
} }
message ModifyCachePoolResponseProto { // void response message ModifyCachePoolResponseProto { // void response
} }
message RemoveCachePoolRequestProto { message RemoveCachePoolRequestProto {
required string poolName = 1; required CachePoolProto pool = 1;
} }
message RemoveCachePoolResponseProto { // void response message RemoveCachePoolResponseProto { // void response
} }
message ListCachePoolsRequestProto { message ListCachePoolsRequestProto {
required string prevPoolName = 1; required CachePoolProto prevPool = 1;
required int32 maxReplies = 2; required int32 maxReplies = 2;
} }
@ -455,11 +461,7 @@ message ListCachePoolsResponseProto {
} }
message ListCachePoolsResponseElementProto { message ListCachePoolsResponseElementProto {
required string poolName = 1; required CachePoolProto pool = 1;
required string ownerName = 2;
required string groupName = 3;
required int32 mode = 4;
required int32 weight = 5;
} }
message GetFileLinkInfoRequestProto { message GetFileLinkInfoRequestProto {

View File

@ -92,6 +92,9 @@ public class TestFsDatasetCache {
@After @After
public void tearDown() throws Exception { public void tearDown() throws Exception {
if (fs != null) {
fs.close();
}
if (cluster != null) { if (cluster != null) {
cluster.shutdown(); cluster.shutdown();
} }
@ -159,13 +162,11 @@ public class TestFsDatasetCache {
} }
/** /**
* Blocks until cache usage changes from the current value, then verifies * Blocks until cache usage hits the expected new value.
* against the expected new value.
*/ */
private long verifyExpectedCacheUsage(final long current, private long verifyExpectedCacheUsage(final long expected) throws Exception {
final long expected) throws Exception {
long cacheUsed = fsd.getCacheUsed(); long cacheUsed = fsd.getCacheUsed();
while (cacheUsed == current) { while (cacheUsed != expected) {
cacheUsed = fsd.getCacheUsed(); cacheUsed = fsd.getCacheUsed();
Thread.sleep(100); Thread.sleep(100);
} }
@ -202,13 +203,13 @@ public class TestFsDatasetCache {
// Cache each block in succession, checking each time // Cache each block in succession, checking each time
for (int i=0; i<NUM_BLOCKS; i++) { for (int i=0; i<NUM_BLOCKS; i++) {
setHeartbeatResponse(cacheBlock(locs[i])); setHeartbeatResponse(cacheBlock(locs[i]));
current = verifyExpectedCacheUsage(current, current + blockSizes[i]); current = verifyExpectedCacheUsage(current + blockSizes[i]);
} }
// Uncache each block in succession, again checking each time // Uncache each block in succession, again checking each time
for (int i=0; i<NUM_BLOCKS; i++) { for (int i=0; i<NUM_BLOCKS; i++) {
setHeartbeatResponse(uncacheBlock(locs[i])); setHeartbeatResponse(uncacheBlock(locs[i]));
current = verifyExpectedCacheUsage(current, current - blockSizes[i]); current = verifyExpectedCacheUsage(current - blockSizes[i]);
} }
} }
@ -237,7 +238,7 @@ public class TestFsDatasetCache {
long current = 0; long current = 0;
for (int i=0; i<numFiles-1; i++) { for (int i=0; i<numFiles-1; i++) {
setHeartbeatResponse(cacheBlocks(fileLocs[i])); setHeartbeatResponse(cacheBlocks(fileLocs[i]));
current = verifyExpectedCacheUsage(current, current + fileSizes[i]); current = verifyExpectedCacheUsage(current + fileSizes[i]);
} }
final long oldCurrent = current; final long oldCurrent = current;
@ -262,7 +263,7 @@ public class TestFsDatasetCache {
// Uncache the n-1 files // Uncache the n-1 files
for (int i=0; i<numFiles-1; i++) { for (int i=0; i<numFiles-1; i++) {
setHeartbeatResponse(uncacheBlocks(fileLocs[i])); setHeartbeatResponse(uncacheBlocks(fileLocs[i]));
current = verifyExpectedCacheUsage(current, current - fileSizes[i]); current = verifyExpectedCacheUsage(current - fileSizes[i]);
} }
} }
} }

View File

@ -17,9 +17,10 @@
*/ */
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.*; import static org.junit.Assert.assertEquals;
import java.io.IOException; import java.io.IOException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays; import java.util.Arrays;
import java.util.List; import java.util.List;
@ -29,53 +30,65 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.EmptyPathError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPathNameError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.InvalidPoolError;
import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError; import org.apache.hadoop.hdfs.protocol.AddPathCacheDirectiveException.PoolWritePermissionDeniedError;
import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolInfo;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.PathCacheDirective; import org.apache.hadoop.hdfs.protocol.PathCacheDirective;
import org.apache.hadoop.hdfs.protocol.PathCacheEntry; import org.apache.hadoop.hdfs.protocol.PathCacheEntry;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.InvalidIdException;
import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException; import org.apache.hadoop.hdfs.protocol.RemovePathCacheEntryException.NoSuchIdException;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Fallible; import org.apache.hadoop.util.Fallible;
import org.junit.After;
import org.junit.Before;
import org.junit.Test; import org.junit.Test;
public class TestPathCacheRequests { public class TestPathCacheRequests {
static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class); static final Log LOG = LogFactory.getLog(TestPathCacheRequests.class);
@Test private static Configuration conf = new HdfsConfiguration();
public void testCreateAndRemovePools() throws Exception { private static MiniDFSCluster cluster = null;
Configuration conf = new HdfsConfiguration(); private static NamenodeProtocols proto = null;
MiniDFSCluster cluster = null;
@Before
public void setUp() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive(); cluster.waitActive();
NamenodeProtocols proto = cluster.getNameNodeRpc(); proto = cluster.getNameNodeRpc();
CachePoolInfo req = new CachePoolInfo("pool1"). }
setOwnerName("bob").setGroupName("bobgroup").
setMode(0755).setWeight(150); @After
proto.addCachePool(req); public void tearDown() throws Exception {
try { if (cluster != null) {
proto.removeCachePool("pool99"); cluster.shutdown();
Assert.fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("can't remove " +
"nonexistent cache pool", ioe);
} }
proto.removeCachePool("pool1"); }
@Test
public void testCreateAndRemovePools() throws Exception {
CachePoolInfo req =
CachePoolInfo.newBuilder().setPoolName("pool1").setOwnerName("bob")
.setGroupName("bobgroup").setMode(new FsPermission((short) 0755))
.setWeight(150).build();
CachePool pool = proto.addCachePool(req);
try { try {
proto.removeCachePool("pool1"); proto.removeCachePool(909);
Assert.fail("expected to get an exception when " +
"removing a non-existent pool.");
} catch (IOException ioe) {
}
proto.removeCachePool(pool.getId());
try {
proto.removeCachePool(pool.getId());
Assert.fail("expected to get an exception when " + Assert.fail("expected to get an exception when " +
"removing a non-existent pool."); "removing a non-existent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
GenericTestUtils.assertExceptionContains("can't remove " +
"nonexistent cache pool", ioe);
} }
req = new CachePoolInfo("pool2"); req = new CachePoolInfo("pool2");
proto.addCachePool(req); proto.addCachePool(req);
@ -83,34 +96,42 @@ public class TestPathCacheRequests {
@Test @Test
public void testCreateAndModifyPools() throws Exception { public void testCreateAndModifyPools() throws Exception {
Configuration conf = new HdfsConfiguration(); // Create a new pool
MiniDFSCluster cluster = null; CachePoolInfo info = CachePoolInfo.newBuilder().
setPoolName("pool1").
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); setOwnerName("abc").
cluster.waitActive(); setGroupName("123").
NamenodeProtocols proto = cluster.getNameNodeRpc(); setMode(new FsPermission((short)0755)).
proto.addCachePool(new CachePoolInfo("pool1"). setWeight(150).
setOwnerName("abc").setGroupName("123"). build();
setMode(0755).setWeight(150)); CachePool pool = proto.addCachePool(info);
proto.modifyCachePool(new CachePoolInfo("pool1"). CachePoolInfo actualInfo = pool.getInfo();
setOwnerName("def").setGroupName("456")); assertEquals("Expected info to match create time settings",
RemoteIterator<CachePoolInfo> iter = proto.listCachePools("", 1); info, actualInfo);
CachePoolInfo info = iter.next(); // Modify the pool
assertEquals("pool1", info.getPoolName()); info = CachePoolInfo.newBuilder().
assertEquals("def", info.getOwnerName()); setPoolName("pool2").
assertEquals("456", info.getGroupName()); setOwnerName("def").
assertEquals(Integer.valueOf(0755), info.getMode()); setGroupName("456").
assertEquals(Integer.valueOf(150), info.getWeight()); setMode(new FsPermission((short)0644)).
setWeight(200).
build();
proto.modifyCachePool(pool.getId(), info);
// Check via listing this time
RemoteIterator<CachePool> iter = proto.listCachePools(0, 1);
CachePool listedPool = iter.next();
actualInfo = listedPool.getInfo();
assertEquals("Expected info to match modified settings", info, actualInfo);
try { try {
proto.removeCachePool("pool99"); proto.removeCachePool(808);
Assert.fail("expected to get an exception when " + Assert.fail("expected to get an exception when " +
"removing a non-existent pool."); "removing a non-existent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
} }
proto.removeCachePool("pool1"); proto.removeCachePool(pool.getId());
try { try {
proto.removeCachePool("pool1"); proto.removeCachePool(pool.getId());
Assert.fail("expected to get an exception when " + Assert.fail("expected to get an exception when " +
"removing a non-existent pool."); "removing a non-existent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
@ -121,13 +142,13 @@ public class TestPathCacheRequests {
RemoteIterator<PathCacheEntry> iter, RemoteIterator<PathCacheEntry> iter,
long id0, long id1, long id2) throws Exception { long id0, long id1, long id2) throws Exception {
Assert.assertEquals(new PathCacheEntry(id0, Assert.assertEquals(new PathCacheEntry(id0,
new PathCacheDirective("/alpha", "pool1")), new PathCacheDirective("/alpha", 1)),
iter.next()); iter.next());
Assert.assertEquals(new PathCacheEntry(id1, Assert.assertEquals(new PathCacheEntry(id1,
new PathCacheDirective("/beta", "pool2")), new PathCacheDirective("/beta", 2)),
iter.next()); iter.next());
Assert.assertEquals(new PathCacheEntry(id2, Assert.assertEquals(new PathCacheEntry(id2,
new PathCacheDirective("/gamma", "pool1")), new PathCacheDirective("/gamma", 1)),
iter.next()); iter.next());
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
} }
@ -140,23 +161,36 @@ public class TestPathCacheRequests {
try { try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
cluster.waitActive(); cluster.waitActive();
NamenodeProtocols proto = cluster.getNameNodeRpc(); final CachePool pool1 = proto.addCachePool(new CachePoolInfo("pool1"));
proto.addCachePool(new CachePoolInfo("pool1")); final CachePool pool2 = proto.addCachePool(new CachePoolInfo("pool2"));
proto.addCachePool(new CachePoolInfo("pool2")); final CachePool pool3 = proto.addCachePool(new CachePoolInfo("pool3"));
proto.addCachePool(new CachePoolInfo("pool3")); final CachePool pool4 = proto.addCachePool(CachePoolInfo.newBuilder()
proto.addCachePool(new CachePoolInfo("pool4").setMode(0)); .setPoolName("pool4")
List<Fallible<PathCacheEntry>> addResults1 = .setMode(new FsPermission((short)0)).build());
proto.addPathCacheDirectives(Arrays.asList( UserGroupInformation testUgi = UserGroupInformation
new PathCacheDirective[] { .createUserForTesting("myuser", new String[]{"mygroup"});
new PathCacheDirective("/alpha", "pool1"), List<Fallible<PathCacheEntry>> addResults1 = testUgi.doAs(
new PathCacheDirective("/beta", "pool2"), new PrivilegedExceptionAction<List<Fallible<PathCacheEntry>>>() {
new PathCacheDirective("", "pool3"), @Override
new PathCacheDirective("/zeta", "nonexistent_pool"), public List<Fallible<PathCacheEntry>> run() throws IOException {
new PathCacheDirective("/zeta", "pool4") List<Fallible<PathCacheEntry>> entries;
})); entries = proto.addPathCacheDirectives(
Arrays.asList(new PathCacheDirective[] {
new PathCacheDirective("/alpha", pool1.getId()),
new PathCacheDirective("/beta", pool2.getId()),
new PathCacheDirective("", pool3.getId()),
new PathCacheDirective("/zeta", 404),
new PathCacheDirective("/zeta", pool4.getId())
}));
return entries;
}
});
// Save the successful additions
long ids1[] = new long[2]; long ids1[] = new long[2];
ids1[0] = addResults1.get(0).get().getEntryId(); for (int i=0; i<2; i++) {
ids1[1] = addResults1.get(1).get().getEntryId(); ids1[i] = addResults1.get(i).get().getEntryId();
}
// Verify that the unsuccessful additions failed properly
try { try {
addResults1.get(2).get(); addResults1.get(2).get();
Assert.fail("expected an error when adding an empty path"); Assert.fail("expected an error when adding an empty path");
@ -167,7 +201,7 @@ public class TestPathCacheRequests {
addResults1.get(3).get(); addResults1.get(3).get();
Assert.fail("expected an error when adding to a nonexistent pool."); Assert.fail("expected an error when adding to a nonexistent pool.");
} catch (IOException ioe) { } catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError);
} }
try { try {
addResults1.get(4).get(); addResults1.get(4).get();
@ -181,10 +215,10 @@ public class TestPathCacheRequests {
List<Fallible<PathCacheEntry>> addResults2 = List<Fallible<PathCacheEntry>> addResults2 =
proto.addPathCacheDirectives(Arrays.asList( proto.addPathCacheDirectives(Arrays.asList(
new PathCacheDirective[] { new PathCacheDirective[] {
new PathCacheDirective("/alpha", "pool1"), new PathCacheDirective("/alpha", pool1.getId()),
new PathCacheDirective("/theta", ""), new PathCacheDirective("/theta", 404),
new PathCacheDirective("bogus", "pool1"), new PathCacheDirective("bogus", pool1.getId()),
new PathCacheDirective("/gamma", "pool1") new PathCacheDirective("/gamma", pool1.getId())
})); }));
long id = addResults2.get(0).get().getEntryId(); long id = addResults2.get(0).get().getEntryId();
Assert.assertEquals("expected to get back the same ID as last time " + Assert.assertEquals("expected to get back the same ID as last time " +
@ -194,7 +228,7 @@ public class TestPathCacheRequests {
Assert.fail("expected an error when adding a path cache " + Assert.fail("expected an error when adding a path cache " +
"directive with an empty pool name."); "directive with an empty pool name.");
} catch (IOException ioe) { } catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof InvalidPoolNameError); Assert.assertTrue(ioe.getCause() instanceof InvalidPoolError);
} }
try { try {
addResults2.get(2).get(); addResults2.get(2).get();
@ -206,14 +240,16 @@ public class TestPathCacheRequests {
long ids2[] = new long[1]; long ids2[] = new long[1];
ids2[0] = addResults2.get(3).get().getEntryId(); ids2[0] = addResults2.get(3).get().getEntryId();
// Validate listing all entries
RemoteIterator<PathCacheEntry> iter = RemoteIterator<PathCacheEntry> iter =
proto.listPathCacheEntries(0, "", 100); proto.listPathCacheEntries(-1l, -1l, 100);
validateListAll(iter, ids1[0], ids1[1], ids2[0]); validateListAll(iter, ids1[0], ids1[1], ids2[0]);
iter = proto.listPathCacheEntries(0, "", 1); iter = proto.listPathCacheEntries(-1l, -1l, 1);
validateListAll(iter, ids1[0], ids1[1], ids2[0]); validateListAll(iter, ids1[0], ids1[1], ids2[0]);
iter = proto.listPathCacheEntries(0, "pool3", 1); // Validate listing certain pools
iter = proto.listPathCacheEntries(0, pool3.getId(), 1);
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
iter = proto.listPathCacheEntries(0, "pool2", 4444); iter = proto.listPathCacheEntries(0, pool2.getId(), 4444);
Assert.assertEquals(addResults1.get(1).get(), Assert.assertEquals(addResults1.get(1).get(),
iter.next()); iter.next());
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
@ -235,7 +271,7 @@ public class TestPathCacheRequests {
} catch (IOException ioe) { } catch (IOException ioe) {
Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException); Assert.assertTrue(ioe.getCause() instanceof NoSuchIdException);
} }
iter = proto.listPathCacheEntries(0, "pool2", 4444); iter = proto.listPathCacheEntries(0, pool2.getId(), 4444);
Assert.assertFalse(iter.hasNext()); Assert.assertFalse(iter.hasNext());
} finally { } finally {
if (cluster != null) { cluster.shutdown(); } if (cluster != null) { cluster.shutdown(); }