Revert from branch-2: HDFS-8332. DFS client API calls should check filesystem closed. Contributed by Rakesh R.
This commit is contained in:
parent
8969a5d45a
commit
ea875939f1
|
@ -381,8 +381,6 @@ Release 2.8.0 - UNRELEASED
|
|||
HDFS-6291. FSImage may be left unclosed in BootstrapStandby#doRun()
|
||||
(Sanghyun Yun via vinayakumarb)
|
||||
|
||||
HDFS-8332. DFS client API calls should check filesystem closed (Rakesh R via umamahesh)
|
||||
|
||||
HDFS-7998. HDFS Federation : Command mentioned to add a NN to existing
|
||||
federated cluster is wrong (Ajith S via vinayakumarb)
|
||||
|
||||
|
|
|
@ -643,7 +643,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#getPreferredBlockSize(String)
|
||||
*/
|
||||
public long getBlockSize(String f) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getBlockSize", f);
|
||||
try {
|
||||
return namenode.getPreferredBlockSize(f);
|
||||
|
@ -660,7 +659,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#getServerDefaults()
|
||||
*/
|
||||
public FsServerDefaults getServerDefaults() throws IOException {
|
||||
checkOpen();
|
||||
long now = Time.monotonicNow();
|
||||
if ((serverDefaults == null) ||
|
||||
(now - serverDefaultsLastUpdate > SERVER_DEFAULTS_VALIDITY_PERIOD)) {
|
||||
|
@ -852,7 +850,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#reportBadBlocks(LocatedBlock[])
|
||||
*/
|
||||
public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
|
||||
checkOpen();
|
||||
namenode.reportBadBlocks(blocks);
|
||||
}
|
||||
|
||||
|
@ -926,7 +923,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public BlockLocation[] getBlockLocations(String src, long start,
|
||||
long length) throws IOException, UnresolvedLinkException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getBlockLocations", src);
|
||||
try {
|
||||
LocatedBlocks blocks = getLocatedBlocks(src, start, length);
|
||||
|
@ -961,7 +957,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
public BlockStorageLocation[] getBlockStorageLocations(
|
||||
List<BlockLocation> blockLocations) throws IOException,
|
||||
UnsupportedOperationException, InvalidBlockTokenException {
|
||||
checkOpen();
|
||||
if (!getConf().isHdfsBlocksMetadataEnabled()) {
|
||||
throw new UnsupportedOperationException("Datanode-side support for " +
|
||||
"getVolumeBlockLocations() must also be enabled in the client " +
|
||||
|
@ -1428,7 +1423,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void createSymlink(String target, String link, boolean createParent)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("createSymlink", target);
|
||||
try {
|
||||
final FsPermission dirPerm = applyUMask(null);
|
||||
|
@ -1551,7 +1545,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public boolean setReplication(String src, short replication)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("setReplication", src);
|
||||
try {
|
||||
return namenode.setReplication(src, replication);
|
||||
|
@ -1575,7 +1568,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
public void setStoragePolicy(String src, String policyName)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("setStoragePolicy", src);
|
||||
try {
|
||||
namenode.setStoragePolicy(src, policyName);
|
||||
|
@ -1595,7 +1587,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @return All the existing storage policies
|
||||
*/
|
||||
public BlockStoragePolicy[] getStoragePolicies() throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("getStoragePolicies", traceSampler);
|
||||
try {
|
||||
return namenode.getStoragePolicies();
|
||||
|
@ -2251,7 +2242,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#setSafeMode(HdfsConstants.SafeModeAction,boolean)
|
||||
*/
|
||||
public boolean setSafeMode(SafeModeAction action) throws IOException {
|
||||
checkOpen();
|
||||
return setSafeMode(action, false);
|
||||
}
|
||||
|
||||
|
@ -2454,7 +2444,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
|
||||
public RemoteIterator<CacheDirectiveEntry> listCacheDirectives(
|
||||
CacheDirectiveInfo filter) throws IOException {
|
||||
checkOpen();
|
||||
return new CacheDirectiveIterator(namenode, filter, traceSampler);
|
||||
}
|
||||
|
||||
|
@ -2495,7 +2484,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
|
||||
public RemoteIterator<CachePoolEntry> listCachePools() throws IOException {
|
||||
checkOpen();
|
||||
return new CachePoolIterator(namenode, traceSampler);
|
||||
}
|
||||
|
||||
|
@ -2505,7 +2493,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#saveNamespace()
|
||||
*/
|
||||
void saveNamespace() throws AccessControlException, IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("saveNamespace", traceSampler);
|
||||
try {
|
||||
namenode.saveNamespace();
|
||||
|
@ -2523,7 +2510,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#rollEdits()
|
||||
*/
|
||||
long rollEdits() throws AccessControlException, IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("rollEdits", traceSampler);
|
||||
try {
|
||||
return namenode.rollEdits();
|
||||
|
@ -2546,7 +2532,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
boolean restoreFailedStorage(String arg)
|
||||
throws AccessControlException, IOException{
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("restoreFailedStorage", traceSampler);
|
||||
try {
|
||||
return namenode.restoreFailedStorage(arg);
|
||||
|
@ -2563,7 +2548,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#refreshNodes()
|
||||
*/
|
||||
public void refreshNodes() throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("refreshNodes", traceSampler);
|
||||
try {
|
||||
namenode.refreshNodes();
|
||||
|
@ -2578,7 +2562,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#metaSave(String)
|
||||
*/
|
||||
public void metaSave(String pathname) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("metaSave", traceSampler);
|
||||
try {
|
||||
namenode.metaSave(pathname);
|
||||
|
@ -2596,7 +2579,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#setBalancerBandwidth(long)
|
||||
*/
|
||||
public void setBalancerBandwidth(long bandwidth) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("setBalancerBandwidth", traceSampler);
|
||||
try {
|
||||
namenode.setBalancerBandwidth(bandwidth);
|
||||
|
@ -2609,7 +2591,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#finalizeUpgrade()
|
||||
*/
|
||||
public void finalizeUpgrade() throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("finalizeUpgrade", traceSampler);
|
||||
try {
|
||||
namenode.finalizeUpgrade();
|
||||
|
@ -2619,7 +2600,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
|
||||
RollingUpgradeInfo rollingUpgrade(RollingUpgradeAction action) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = Trace.startSpan("rollingUpgrade", traceSampler);
|
||||
try {
|
||||
return namenode.rollingUpgrade(action);
|
||||
|
@ -2705,7 +2685,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
* @see ClientProtocol#getContentSummary(String)
|
||||
*/
|
||||
ContentSummary getContentSummary(String src) throws IOException {
|
||||
checkOpen();
|
||||
TraceScope scope = getPathTraceScope("getContentSummary", src);
|
||||
try {
|
||||
return namenode.getContentSummary(src);
|
||||
|
@ -2724,7 +2703,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
void setQuota(String src, long namespaceQuota, long storagespaceQuota)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
// sanity check
|
||||
if ((namespaceQuota <= 0 && namespaceQuota != HdfsConstants.QUOTA_DONT_SET &&
|
||||
namespaceQuota != HdfsConstants.QUOTA_RESET) ||
|
||||
|
@ -2758,7 +2736,6 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
*/
|
||||
void setQuotaByStorageType(String src, StorageType type, long quota)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
if (quota <= 0 && quota != HdfsConstants.QUOTA_DONT_SET &&
|
||||
quota != HdfsConstants.QUOTA_RESET) {
|
||||
throw new IllegalArgumentException("Invalid values for quota :" +
|
||||
|
@ -3104,13 +3081,11 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
|
|||
}
|
||||
|
||||
public DFSInotifyEventInputStream getInotifyEventStream() throws IOException {
|
||||
checkOpen();
|
||||
return new DFSInotifyEventInputStream(traceSampler, namenode);
|
||||
}
|
||||
|
||||
public DFSInotifyEventInputStream getInotifyEventStream(long lastReadTxid)
|
||||
throws IOException {
|
||||
checkOpen();
|
||||
return new DFSInotifyEventInputStream(traceSampler, namenode, lastReadTxid);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,16 +62,12 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
|
|||
import org.apache.hadoop.fs.Options.ChecksumOpt;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.RemoteIterator;
|
||||
import org.apache.hadoop.fs.StorageType;
|
||||
import org.apache.hadoop.fs.VolumeId;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.client.impl.LeaseRenewer;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
||||
import org.apache.hadoop.hdfs.server.namenode.ha.HATestUtil;
|
||||
import org.apache.hadoop.hdfs.web.HftpFileSystem;
|
||||
|
@ -166,176 +162,25 @@ public class TestDistributedFileSystem {
|
|||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
|
||||
DistributedFileSystem fileSys = cluster.getFileSystem();
|
||||
|
||||
FileSystem fileSys = cluster.getFileSystem();
|
||||
|
||||
// create two files, leaving them open
|
||||
fileSys.create(new Path("/test/dfsclose/file-0"));
|
||||
fileSys.create(new Path("/test/dfsclose/file-1"));
|
||||
|
||||
|
||||
// create another file, close it, and read it, so
|
||||
// the client gets a socket in its SocketCache
|
||||
Path p = new Path("/non-empty-file");
|
||||
DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L);
|
||||
DFSTestUtil.readFile(fileSys, p);
|
||||
|
||||
|
||||
fileSys.close();
|
||||
|
||||
DFSClient dfsClient = fileSys.getClient();
|
||||
verifyOpsUsingClosedClient(dfsClient);
|
||||
|
||||
} finally {
|
||||
if (cluster != null) {cluster.shutdown();}
|
||||
}
|
||||
}
|
||||
|
||||
private void verifyOpsUsingClosedClient(DFSClient dfsClient) {
|
||||
Path p = new Path("/non-empty-file");
|
||||
try {
|
||||
dfsClient.getBlockSize(p.getName());
|
||||
fail("getBlockSize using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getServerDefaults();
|
||||
fail("getServerDefaults using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.reportBadBlocks(new LocatedBlock[0]);
|
||||
fail("reportBadBlocks using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getBlockLocations(p.getName(), 0, 1);
|
||||
fail("getBlockLocations using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getBlockStorageLocations(new ArrayList<BlockLocation>());
|
||||
fail("getBlockStorageLocations using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.createSymlink("target", "link", true);
|
||||
fail("createSymlink using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getLinkTarget(p.getName());
|
||||
fail("getLinkTarget using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.setReplication(p.getName(), (short) 3);
|
||||
fail("setReplication using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.setStoragePolicy(p.getName(),
|
||||
HdfsConstants.ONESSD_STORAGE_POLICY_NAME);
|
||||
fail("setStoragePolicy using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getStoragePolicies();
|
||||
fail("getStoragePolicies using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
|
||||
fail("setSafeMode using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.refreshNodes();
|
||||
fail("refreshNodes using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.metaSave(p.getName());
|
||||
fail("metaSave using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.setBalancerBandwidth(1000L);
|
||||
fail("setBalancerBandwidth using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.finalizeUpgrade();
|
||||
fail("finalizeUpgrade using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.rollingUpgrade(RollingUpgradeAction.QUERY);
|
||||
fail("rollingUpgrade using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getInotifyEventStream();
|
||||
fail("getInotifyEventStream using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getInotifyEventStream(100L);
|
||||
fail("getInotifyEventStream using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.saveNamespace();
|
||||
fail("saveNamespace using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.rollEdits();
|
||||
fail("rollEdits using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.restoreFailedStorage("");
|
||||
fail("restoreFailedStorage using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.getContentSummary(p.getName());
|
||||
fail("getContentSummary using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.setQuota(p.getName(), 1000L, 500L);
|
||||
fail("setQuota using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfsClient.setQuotaByStorageType(p.getName(), StorageType.DISK, 500L);
|
||||
fail("setQuotaByStorageType using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testDFSCloseOrdering() throws Exception {
|
||||
DistributedFileSystem fs = new MyDistributedFileSystem();
|
||||
|
|
|
@ -228,7 +228,6 @@ public class TestRollingUpgradeRollback {
|
|||
dfs.mkdirs(bar);
|
||||
dfs.close();
|
||||
|
||||
dfs = dfsCluster.getFileSystem(0);
|
||||
TestRollingUpgrade.queryForPreparation(dfs);
|
||||
|
||||
// If the query returns true, both active and the standby NN should have
|
||||
|
|
|
@ -59,6 +59,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.hdfs.DistributedFileSystem;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.LogVerificationAppender;
|
||||
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveEntry;
|
||||
import org.apache.hadoop.hdfs.protocol.CacheDirectiveInfo;
|
||||
|
@ -295,35 +296,6 @@ public class TestCacheDirectives {
|
|||
|
||||
info = new CachePoolInfo("pool2");
|
||||
dfs.addCachePool(info);
|
||||
|
||||
// Perform cache pool operations using a closed file system.
|
||||
DistributedFileSystem dfs1 = (DistributedFileSystem) cluster
|
||||
.getNewFileSystemInstance(0);
|
||||
dfs1.close();
|
||||
try {
|
||||
dfs1.listCachePools();
|
||||
fail("listCachePools using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfs1.addCachePool(info);
|
||||
fail("addCachePool using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfs1.modifyCachePool(info);
|
||||
fail("modifyCachePool using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfs1.removeCachePool(poolName);
|
||||
fail("removeCachePool using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
|
@ -566,35 +538,6 @@ public class TestCacheDirectives {
|
|||
dfs.modifyCacheDirective(new CacheDirectiveInfo.Builder(
|
||||
directive).setId(id).setReplication((short)2).build());
|
||||
dfs.removeCacheDirective(id);
|
||||
|
||||
// Perform cache directive operations using a closed file system.
|
||||
DistributedFileSystem dfs1 = (DistributedFileSystem) cluster
|
||||
.getNewFileSystemInstance(0);
|
||||
dfs1.close();
|
||||
try {
|
||||
dfs1.listCacheDirectives(null);
|
||||
fail("listCacheDirectives using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfs1.addCacheDirective(alpha);
|
||||
fail("addCacheDirective using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfs1.modifyCacheDirective(alpha);
|
||||
fail("modifyCacheDirective using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
try {
|
||||
dfs1.removeCacheDirective(alphaId);
|
||||
fail("removeCacheDirective using a closed filesystem!");
|
||||
} catch (IOException ioe) {
|
||||
GenericTestUtils.assertExceptionContains("Filesystem closed", ioe);
|
||||
}
|
||||
}
|
||||
|
||||
@Test(timeout=60000)
|
||||
|
|
Loading…
Reference in New Issue