From 1222889f1a3b45df30c48a80d8c2bec7ad45ee43 Mon Sep 17 00:00:00 2001 From: Akira Ajisaka Date: Thu, 21 Jul 2016 11:25:28 -0700 Subject: [PATCH] HDFS-10287. MiniDFSCluster should implement AutoCloseable. Contributed by Andras Bokor. (cherry picked from commit fcde6940e0cbdedb1105007e4857137ecdfa1284) --- .../apache/hadoop/hdfs/MiniDFSCluster.java | 7 ++- .../hadoop/hdfs/TestMiniDFSCluster.java | 53 +++++++------------ 2 files changed, 24 insertions(+), 36 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 8215b70b250..248c4f22444 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -136,7 +136,7 @@ */ @InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "Hive", "MapReduce", "Pig"}) @InterfaceStability.Unstable -public class MiniDFSCluster { +public class MiniDFSCluster implements AutoCloseable { private static final String NAMESERVICE_ID_PREFIX = "nameserviceId"; private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class); @@ -2971,4 +2971,9 @@ private void addToFile(String p, String address) throws IOException { writer.close(); } } + + @Override + public void close() { + shutdown(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index ec72d879379..4d027dcfd97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -64,22 +64,17 @@ public void setUp() { public void testClusterWithoutSystemProperties() throws Throwable { String oldPrp = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA); System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA); - MiniDFSCluster cluster = null; - try { - Configuration conf = new HdfsConfiguration(); - File testDataCluster1 = new File(testDataPath, CLUSTER_1); - String c1Path = testDataCluster1.getAbsolutePath(); - conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); - cluster = new MiniDFSCluster.Builder(conf).build(); + Configuration conf = new HdfsConfiguration(); + File testDataCluster1 = new File(testDataPath, CLUSTER_1); + String c1Path = testDataCluster1.getAbsolutePath(); + conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path); + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build()){ assertEquals(new File(c1Path + "/data"), new File(cluster.getDataDirectory())); } finally { if (oldPrp != null) { System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, oldPrp); } - if (cluster != null) { - cluster.shutdown(); - } } } @@ -110,15 +105,12 @@ public void testClusterSetDatanodeHostname() throws Throwable { File testDataCluster5 = new File(testDataPath, CLUSTER_5); String c5Path = testDataCluster5.getAbsolutePath(); conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c5Path); - MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf) - .numDataNodes(1) - .checkDataNodeHostConfig(true) - .build(); - try { + try (MiniDFSCluster cluster5 = new MiniDFSCluster.Builder(conf) + .numDataNodes(1) + .checkDataNodeHostConfig(true) + .build()) { assertEquals("DataNode hostname config not respected", "MYHOST", cluster5.getDataNodes().get(0).getDatanodeId().getHostName()); - } finally { - MiniDFSCluster.shutdownCluster(cluster5); } } @@ -128,9 +120,8 @@ public void testClusterSetDatanodeDifferentStorageType() throws IOException { StorageType[][] storageType = new StorageType[][] { {StorageType.DISK, StorageType.ARCHIVE}, {StorageType.DISK}, {StorageType.ARCHIVE}}; - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(3).storageTypes(storageType).build(); - try { + try (MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3).storageTypes(storageType).build()) { cluster.waitActive(); ArrayList dataNodes = cluster.getDataNodes(); // Check the number of directory in DN's @@ -138,17 +129,14 @@ public void testClusterSetDatanodeDifferentStorageType() throws IOException { assertEquals(DataNode.getStorageLocations(dataNodes.get(i).getConf()) .size(), storageType[i].length); } - } finally { - MiniDFSCluster.shutdownCluster(cluster); } } @Test public void testClusterNoStorageTypeSetForDatanodes() throws IOException { final Configuration conf = new HdfsConfiguration(); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) - .numDataNodes(3).build(); - try { + try (final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(3).build()) { cluster.waitActive(); ArrayList dataNodes = cluster.getDataNodes(); // Check the number of directory in DN's @@ -156,20 +144,17 @@ public void testClusterNoStorageTypeSetForDatanodes() throws IOException { assertEquals(DataNode.getStorageLocations(datanode.getConf()).size(), 2); } - } finally { - MiniDFSCluster.shutdownCluster(cluster); } } @Test public void testSetUpFederatedCluster() throws Exception { Configuration conf = new Configuration(); - MiniDFSCluster cluster = - new MiniDFSCluster.Builder(conf).nnTopology( - MiniDFSNNTopology.simpleHAFederatedTopology(2)) - .numDataNodes(2) - .build(); - try { + + try (MiniDFSCluster cluster = + new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHAFederatedTopology(2)) + .numDataNodes(2).build()) { cluster.waitActive(); cluster.transitionToActive(1); cluster.transitionToActive(3); @@ -201,8 +186,6 @@ public void testSetUpFederatedCluster() throws Exception { DFSUtil.addKeySuffixes( DFS_NAMENODE_HTTP_ADDRESS_KEY, "ns1", "nn1"))); } - } finally { - MiniDFSCluster.shutdownCluster(cluster); } } }