HDFS-5356. MiniDFSCluster should close all open FileSystems when shutdown() (Contributed by Rakesh R)

This commit is contained in:
Vinayakumar B 2015-03-17 15:32:34 +05:30
parent e5370477c2
commit 018893e81e
4 changed files with 35 additions and 8 deletions

View File

@ -327,6 +327,9 @@ Release 2.8.0 - UNRELEASED
BUG FIXES
HDFS-5356. MiniDFSCluster should close all open FileSystems when shutdown()
(Rakesh R via vinayakumarb)
Release 2.7.0 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -60,6 +60,7 @@ import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -118,6 +119,7 @@ import org.apache.hadoop.util.ToolRunner;
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.Sets;
/**
* This class creates a single-process DFS cluster for junit testing.
@ -523,7 +525,8 @@ public class MiniDFSCluster {
private boolean federation;
private boolean checkExitOnShutdown = true;
protected final int storagesPerDatanode;
private Set<FileSystem> fileSystems = Sets.newHashSet();
/**
* A unique instance identifier for the cluster. This
* is used to disambiguate HA filesystems in the case where
@ -1705,6 +1708,13 @@ public class MiniDFSCluster {
* Shutdown all the nodes in the cluster.
*/
public void shutdown(boolean deleteDfsDir) {
shutdown(deleteDfsDir, true);
}
/**
* Shutdown all the nodes in the cluster.
*/
public void shutdown(boolean deleteDfsDir, boolean closeFileSystem) {
LOG.info("Shutting down the Mini HDFS Cluster");
if (checkExitOnShutdown) {
if (ExitUtil.terminateCalled()) {
@ -1714,6 +1724,16 @@ public class MiniDFSCluster {
throw new AssertionError("Test resulted in an unexpected exit");
}
}
if (closeFileSystem) {
for (FileSystem fs : fileSystems) {
try {
fs.close();
} catch (IOException ioe) {
LOG.warn("Exception while closing file system", ioe);
}
}
fileSystems.clear();
}
shutdownDataNodes();
for (NameNodeInfo nnInfo : nameNodes) {
if (nnInfo == null) continue;
@ -2144,8 +2164,10 @@ public class MiniDFSCluster {
* Get a client handle to the DFS cluster for the namenode at given index.
*/
public DistributedFileSystem getFileSystem(int nnIndex) throws IOException {
return (DistributedFileSystem)FileSystem.get(getURI(nnIndex),
nameNodes[nnIndex].conf);
DistributedFileSystem dfs = (DistributedFileSystem) FileSystem.get(
getURI(nnIndex), nameNodes[nnIndex].conf);
fileSystems.add(dfs);
return dfs;
}
/**
@ -2153,7 +2175,9 @@ public class MiniDFSCluster {
* This simulating different threads working on different FileSystem instances.
*/
public FileSystem getNewFileSystemInstance(int nnIndex) throws IOException {
return FileSystem.newInstance(getURI(nnIndex), nameNodes[nnIndex].conf);
FileSystem dfs = FileSystem.newInstance(getURI(nnIndex), nameNodes[nnIndex].conf);
fileSystems.add(dfs);
return dfs;
}
/**

View File

@ -675,7 +675,7 @@ public class TestFileCreation {
// restart cluster with the same namenode port as before.
// This ensures that leases are persisted in fsimage.
cluster.shutdown();
cluster.shutdown(false, false);
try {
Thread.sleep(2*MAX_IDLE_TIME);
} catch (InterruptedException e) {
@ -687,7 +687,7 @@ public class TestFileCreation {
// restart cluster yet again. This triggers the code to read in
// persistent leases from fsimage.
cluster.shutdown();
cluster.shutdown(false, false);
try {
Thread.sleep(5000);
} catch (InterruptedException e) {

View File

@ -519,8 +519,8 @@ public class TestRenameWithSnapshots {
File fsnAfter = new File(testDir, "dumptree_after");
SnapshotTestHelper.dumpTree2File(fsdir, fsnBefore);
cluster.shutdown();
cluster.shutdown(false, false);
cluster = new MiniDFSCluster.Builder(conf).format(false)
.numDataNodes(REPL).build();
cluster.waitActive();