From c7e6f076df5b38702579db352475113e5f3ae5fb Mon Sep 17 00:00:00 2001 From: Xiaoyu Yao Date: Thu, 6 Jun 2019 11:20:04 -0700 Subject: [PATCH] HDDS-1650. Fix Ozone tests leaking volume checker thread. Contributed by Xiaoyu Yao. (#915) --- .../common/impl/TestHddsDispatcher.java | 9 ++++++--- .../TestRoundRobinVolumeChoosingPolicy.java | 13 ++++++++++++- .../container/common/volume/TestVolumeSet.java | 1 + .../common/volume/TestVolumeSetDiskChecks.java | 3 +++ .../container/keyvalue/TestKeyValueHandler.java | 7 ++++--- .../container/ozoneimpl/TestOzoneContainer.java | 16 +++++++++++----- 6 files changed, 37 insertions(+), 12 deletions(-) diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java index d4258201936..54dbe94c1c2 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java @@ -73,13 +73,15 @@ public class TestHddsDispatcher { public void testContainerCloseActionWhenFull() throws IOException { String testDir = GenericTestUtils.getTempPath( TestHddsDispatcher.class.getSimpleName()); + OzoneConfiguration conf = new OzoneConfiguration(); + DatanodeDetails dd = randomDatanodeDetails(); + VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); + try { UUID scmId = UUID.randomUUID(); - OzoneConfiguration conf = new OzoneConfiguration(); conf.set(HDDS_DATANODE_DIR_KEY, testDir); - DatanodeDetails dd = randomDatanodeDetails(); ContainerSet containerSet = new ContainerSet(); - VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf); + DatanodeStateMachine stateMachine = Mockito.mock( DatanodeStateMachine.class); StateContext context = Mockito.mock(StateContext.class); @@ -118,6 +120,7 @@ public class TestHddsDispatcher { .addContainerActionIfAbsent(Mockito.any(ContainerAction.class)); } finally { + volumeSet.shutdown(); FileUtils.deleteDirectory(new File(testDir)); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java index 80594d35245..d0fbf10269c 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestRoundRobinVolumeChoosingPolicy.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException; import org.apache.hadoop.util.ReflectionUtils; +import org.junit.After; import org.junit.Assert; import org.junit.Before; import org.junit.Test; @@ -40,10 +41,12 @@ public class TestRoundRobinVolumeChoosingPolicy { private RoundRobinVolumeChoosingPolicy policy; private List volumes; + private VolumeSet volumeSet; private final String baseDir = MiniDFSCluster.getBaseDirectory(); private final String volume1 = baseDir + "disk1"; private final String volume2 = baseDir + "disk2"; + private static final String DUMMY_IP_ADDR = "0.0.0.0"; @Before @@ -53,10 +56,18 @@ public class TestRoundRobinVolumeChoosingPolicy { conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey); policy = ReflectionUtils.newInstance( RoundRobinVolumeChoosingPolicy.class, null); - VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); + volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); volumes = volumeSet.getVolumesList(); } + @After + public void cleanUp() { + if (volumeSet != null) { + volumeSet.shutdown(); + volumeSet = null; + } + } + @Test public void testRRVolumeChoosingPolicy() throws Exception { HddsVolume hddsVolume1 = volumes.get(0); diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java index f97ad4e873f..79eeb61495a 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSet.java @@ -237,6 +237,7 @@ public class TestVolumeSet { //Set back to writable try { readOnlyVolumePath.setWritable(true); + volSet.shutdown(); } finally { FileUtil.fullyDelete(volumePath); } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java index 472bb9891cb..c5deff0fc78 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/volume/TestVolumeSetDiskChecks.java @@ -100,6 +100,7 @@ public class TestVolumeSetDiskChecks { for (String d : dirs) { assertTrue(new File(d).isDirectory()); } + volumeSet.shutdown(); } /** @@ -124,6 +125,7 @@ public class TestVolumeSetDiskChecks { assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes)); assertThat(volumeSet.getVolumesList().size(), is(numVolumes - numBadVolumes)); + volumeSet.shutdown(); } /** @@ -146,6 +148,7 @@ public class TestVolumeSetDiskChecks { assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes); assertEquals(volumeSet.getVolumesList().size(), 0); + volumeSet.shutdown(); } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java index 8ef9e19d537..2c71fef11a6 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java @@ -229,14 +229,14 @@ public class TestKeyValueHandler { @Test public void testVolumeSetInKeyValueHandler() throws Exception{ File path = GenericTestUtils.getRandomizedTestDir(); + Configuration conf = new OzoneConfiguration(); + conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); + VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); try { - Configuration conf = new OzoneConfiguration(); - conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath()); ContainerSet cset = new ContainerSet(); int[] interval = new int[1]; interval[0] = 2; ContainerMetrics metrics = new ContainerMetrics(interval); - VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf); DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class); DatanodeStateMachine stateMachine = Mockito.mock( DatanodeStateMachine.class); @@ -263,6 +263,7 @@ public class TestKeyValueHandler { ex); } } finally { + volumeSet.shutdown(); FileUtil.fullyDelete(path); } } diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java index f5ebb49d063..b0d3a0f3b7b 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java @@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer; import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData; import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils; +import org.junit.After; import org.junit.Before; import org.junit.Rule; import org.junit.Test; @@ -82,13 +83,20 @@ public class TestOzoneContainer { conf.set(HddsConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath()); commitSpaceMap = new HashMap(); + volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); + volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); + } + + @After + public void cleanUp() throws Exception { + if (volumeSet != null) { + volumeSet.shutdown(); + volumeSet = null; + } } @Test public void testBuildContainerMap() throws Exception { - volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); - // Format the volumes for (HddsVolume volume : volumeSet.getVolumesList()) { volume.format(UUID.randomUUID().toString()); @@ -139,8 +147,6 @@ public class TestOzoneContainer { @Test public void testContainerCreateDiskFull() throws Exception { - volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf); - volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy(); long containerSize = (long) StorageUnit.MB.toBytes(100); boolean diskSpaceException = false;