From f791e291ca39eac6aa0650319e8dd606d15d5804 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Wed, 4 Dec 2013 20:06:25 +0000 Subject: [PATCH] HDFS-5555. CacheAdmin commands fail when first listed NameNode is in Standby (jxiang via cmccabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1547895 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSClient.java | 16 +--- .../hdfs/protocol/CacheDirectiveIterator.java | 56 +++++++++++ .../hdfs/protocol/CachePoolIterator.java | 53 +++++++++++ .../hadoop/hdfs/protocol/ClientProtocol.java | 10 +- ...amenodeProtocolServerSideTranslatorPB.java | 45 ++------- .../ClientNamenodeProtocolTranslatorPB.java | 84 ++++------------- .../server/namenode/NameNodeRpcServer.java | 53 +---------- .../server/namenode/TestCacheDirectives.java | 3 +- .../namenode/ha/TestRetryCacheWithHA.java | 92 +++++++++++++++++++ 10 files changed, 250 insertions(+), 165 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4ac7567a988..c43ef6e6d31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -421,6 +421,9 @@ Trunk (Unreleased) HDFS-5562. TestCacheDirectives and TestFsDatasetCache should stub out native mlock. (Colin McCabe and Akira Ajisaka via wang) + HDFS-5555. CacheAdmin commands fail when first listed NameNode is in + Standby (jxiang via cmccabe) + Release 2.4.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 11cdb4f26d9..f008878e48e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -109,8 +109,10 @@ import org.apache.hadoop.hdfs.client.ClientMmapManager; import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; +import org.apache.hadoop.hdfs.protocol.CachePoolIterator; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -2324,12 +2326,7 @@ public class DFSClient implements java.io.Closeable { public RemoteIterator listCacheDirectives( CacheDirectiveInfo filter) throws IOException { - checkOpen(); - try { - return namenode.listCacheDirectives(0, filter); - } catch (RemoteException re) { - throw re.unwrapRemoteException(); - } + return new CacheDirectiveIterator(namenode, filter); } public void addCachePool(CachePoolInfo info) throws IOException { @@ -2360,12 +2357,7 @@ public class DFSClient implements java.io.Closeable { } public RemoteIterator listCachePools() throws IOException { - checkOpen(); - try { - return namenode.listCachePools(""); - } catch (RemoteException re) { - throw re.unwrapRemoteException(); - } + return new CachePoolIterator(namenode); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java new file mode 100644 index 00000000000..773a2841321 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveIterator.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BatchedRemoteIterator; + +/** + * CacheDirectiveIterator is a remote iterator that iterates cache directives. + * It supports retrying in case of namenode failover. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class CacheDirectiveIterator + extends BatchedRemoteIterator { + + private final CacheDirectiveInfo filter; + private final ClientProtocol namenode; + + public CacheDirectiveIterator(ClientProtocol namenode, + CacheDirectiveInfo filter) { + super(Long.valueOf(0)); + this.namenode = namenode; + this.filter = filter; + } + + @Override + public BatchedEntries makeRequest(Long prevKey) + throws IOException { + return namenode.listCacheDirectives(prevKey, filter); + } + + @Override + public Long elementToPrevKey(CacheDirectiveEntry entry) { + return entry.getInfo().getId(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java new file mode 100644 index 00000000000..44d6b451742 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CachePoolIterator.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.protocol; + +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.BatchedRemoteIterator; + +/** + * CachePoolIterator is a remote iterator that iterates cache pools. + * It supports retrying in case of namenode failover. + */ +@InterfaceAudience.Private +@InterfaceStability.Evolving +public class CachePoolIterator + extends BatchedRemoteIterator { + + private final ClientProtocol namenode; + + public CachePoolIterator(ClientProtocol namenode) { + super(""); + this.namenode = namenode; + } + + @Override + public BatchedEntries makeRequest(String prevKey) + throws IOException { + return namenode.listCachePools(prevKey); + } + + @Override + public String elementToPrevKey(CachePoolEntry entry) { + return entry.getInfo().getPoolName(); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java index 8852f818f87..709047ac124 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java @@ -28,9 +28,9 @@ import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.ParentNotDirectoryException; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSConfigKeys; @@ -1134,10 +1134,10 @@ public interface ClientProtocol { * listCacheDirectives. * @param filter Parameters to use to filter the list results, * or null to display all directives visible to us. - * @return A RemoteIterator which returns CacheDirectiveInfo objects. + * @return A batch of CacheDirectiveEntry objects. */ @Idempotent - public RemoteIterator listCacheDirectives( + public BatchedEntries listCacheDirectives( long prevId, CacheDirectiveInfo filter) throws IOException; /** @@ -1175,9 +1175,9 @@ public interface ClientProtocol { * * @param prevPool name of the last pool listed, or the empty string if this is * the first invocation of listCachePools - * @return A RemoteIterator which returns CachePool objects. + * @return A batch of CachePoolEntry objects. */ @Idempotent - public RemoteIterator listCachePools(String prevPool) + public BatchedEntries listCachePools(String prevPool) throws IOException; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 6529ca51b8b..478b661677c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -24,12 +24,9 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FsServerDefaults; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.Options.Rename; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.RemoteIterator; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; -import org.apache.hadoop.hdfs.protocol.CachePoolInfo; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; @@ -52,8 +49,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolEntryProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CachePoolInfoProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; @@ -109,7 +104,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCa import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCachePoolsResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCorruptFileBlocksResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ListCacheDirectivesResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.MetaSaveRequestProto; @@ -176,9 +170,7 @@ import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRespons import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.security.token.Token; -import org.apache.commons.lang.StringUtils; -import com.google.common.primitives.Shorts; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; @@ -1079,21 +1071,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements try { CacheDirectiveInfo filter = PBHelper.convert(request.getFilter()); - RemoteIterator iter = - server.listCacheDirectives(request.getPrevId(), filter); + BatchedEntries entries = + server.listCacheDirectives(request.getPrevId(), filter); ListCacheDirectivesResponseProto.Builder builder = ListCacheDirectivesResponseProto.newBuilder(); - long prevId = 0; - while (iter.hasNext()) { - CacheDirectiveEntry entry = iter.next(); - builder.addElements(PBHelper.convert(entry)); - prevId = entry.getInfo().getId(); - } - if (prevId == 0) { - builder.setHasMore(false); - } else { - iter = server.listCacheDirectives(prevId, filter); - builder.setHasMore(iter.hasNext()); + builder.setHasMore(entries.hasMore()); + for (int i=0, n=entries.size(); i iter = + BatchedEntries entries = server.listCachePools(request.getPrevPoolName()); ListCachePoolsResponseProto.Builder responseBuilder = ListCachePoolsResponseProto.newBuilder(); - String prevPoolName = null; - while (iter.hasNext()) { - CachePoolEntry entry = iter.next(); - responseBuilder.addEntries(PBHelper.convert(entry)); - prevPoolName = entry.getInfo().getPoolName(); - } - // fill in hasNext - if (prevPoolName == null) { - responseBuilder.setHasMore(false); - } else { - iter = server.listCachePools(prevPoolName); - responseBuilder.setHasMore(iter.hasNext()); + responseBuilder.setHasMore(entries.hasMore()); + for (int i=0, n=entries.size(); i { - private final CacheDirectiveInfo filter; - - public CacheEntriesIterator(long prevKey, - CacheDirectiveInfo filter) { - super(prevKey); - this.filter = filter; - } - - @Override - public BatchedEntries makeRequest( - Long nextKey) throws IOException { - ListCacheDirectivesResponseProto response; - try { - response = rpcProxy.listCacheDirectives(null, - ListCacheDirectivesRequestProto.newBuilder(). - setPrevId(nextKey). - setFilter(PBHelper.convert(filter)). - build()); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - return new BatchedCacheEntries(response); - } - - @Override - public Long elementToPrevKey(CacheDirectiveEntry element) { - return element.getInfo().getId(); - } - } - @Override - public RemoteIterator + public BatchedEntries listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws IOException { if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } - return new CacheEntriesIterator(prevId, filter); + try { + return new BatchedCacheEntries( + rpcProxy.listCacheDirectives(null, + ListCacheDirectivesRequestProto.newBuilder(). + setPrevId(prevId). + setFilter(PBHelper.convert(filter)). + build())); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } } @Override @@ -1164,35 +1139,16 @@ public class ClientNamenodeProtocolTranslatorPB implements } } - private class CachePoolIterator - extends BatchedRemoteIterator { - - public CachePoolIterator(String prevKey) { - super(prevKey); - } - - @Override - public BatchedEntries makeRequest(String prevKey) - throws IOException { - try { - return new BatchedCachePoolEntries( - rpcProxy.listCachePools(null, - ListCachePoolsRequestProto.newBuilder(). - setPrevPoolName(prevKey).build())); - } catch (ServiceException e) { - throw ProtobufHelper.getRemoteException(e); - } - } - - @Override - public String elementToPrevKey(CachePoolEntry entry) { - return entry.getInfo().getPoolName(); - } - } - @Override - public RemoteIterator listCachePools(String prevKey) + public BatchedEntries listCachePools(String prevKey) throws IOException { - return new CachePoolIterator(prevKey); + try { + return new BatchedCachePoolEntries( + rpcProxy.listCachePools(null, + ListCachePoolsRequestProto.newBuilder(). + setPrevPoolName(prevKey).build())); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index aa42ec676e9..cb235159a50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -36,7 +36,6 @@ import java.util.Set; import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.BatchedRemoteIterator; import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; @@ -46,8 +45,8 @@ import org.apache.hadoop.fs.InvalidPathException; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.ParentNotDirectoryException; -import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; import org.apache.hadoop.ha.HAServiceStatus; @@ -1251,36 +1250,13 @@ class NameNodeRpcServer implements NamenodeProtocols { namesystem.removeCacheDirective(id); } - private class ServerSideCacheEntriesIterator - extends BatchedRemoteIterator { - - private final CacheDirectiveInfo filter; - - public ServerSideCacheEntriesIterator (Long firstKey, - CacheDirectiveInfo filter) { - super(firstKey); - this.filter = filter; - } - - @Override - public BatchedEntries makeRequest( - Long nextKey) throws IOException { - return namesystem.listCacheDirectives(nextKey, filter); - } - - @Override - public Long elementToPrevKey(CacheDirectiveEntry entry) { - return entry.getInfo().getId(); - } - } - @Override - public RemoteIterator listCacheDirectives(long prevId, + public BatchedEntries listCacheDirectives(long prevId, CacheDirectiveInfo filter) throws IOException { if (filter == null) { filter = new CacheDirectiveInfo.Builder().build(); } - return new ServerSideCacheEntriesIterator(prevId, filter); + return namesystem.listCacheDirectives(prevId, filter); } @Override @@ -1298,28 +1274,9 @@ class NameNodeRpcServer implements NamenodeProtocols { namesystem.removeCachePool(cachePoolName); } - private class ServerSideCachePoolIterator - extends BatchedRemoteIterator { - - public ServerSideCachePoolIterator(String prevKey) { - super(prevKey); - } - - @Override - public BatchedEntries makeRequest(String prevKey) - throws IOException { - return namesystem.listCachePools(prevKey); - } - - @Override - public String elementToPrevKey(CachePoolEntry entry) { - return entry.getInfo().getPoolName(); - } - } - @Override - public RemoteIterator listCachePools(String prevKey) + public BatchedEntries listCachePools(String prevKey) throws IOException { - return new ServerSideCachePoolIterator(prevKey); + return namesystem.listCachePools(prevKey != null ? prevKey : ""); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java index 6fd7881a94b..36492669114 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCacheDirectives.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry; import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo; +import org.apache.hadoop.hdfs.protocol.CacheDirectiveIterator; import org.apache.hadoop.hdfs.protocol.CacheDirectiveStats; import org.apache.hadoop.hdfs.protocol.CachePoolEntry; import org.apache.hadoop.hdfs.protocol.CachePoolInfo; @@ -763,7 +764,7 @@ public class TestCacheDirectives { } // Uncache and check each path in sequence RemoteIterator entries = - nnRpc.listCacheDirectives(0, null); + new CacheDirectiveIterator(nnRpc, null); for (int i=0; i poolNames = new HashSet(poolCount); + for (int i=0; i poolNames = new HashSet(poolCount); + Path path = new Path("/p"); + for (int i=0; i poolNames, int active) throws Exception { + HashSet tmpNames = (HashSet)poolNames.clone(); + RemoteIterator pools = dfs.listCachePools(); + int poolCount = poolNames.size(); + for (int i=0; i poolNames, int active) throws Exception { + HashSet tmpNames = (HashSet)poolNames.clone(); + RemoteIterator directives = dfs.listCacheDirectives(null); + int poolCount = poolNames.size(); + for (int i=0; i