HDDS-1650. Fix Ozone tests leaking volume checker thread. Contributed by Xiaoyu Yao. (#915)

This commit is contained in:
Xiaoyu Yao 2019-06-06 11:20:04 -07:00 committed by Nanda kumar
parent 76c0183ae3
commit c7e6f076df
6 changed files with 37 additions and 12 deletions

View File

@ -73,13 +73,15 @@ public class TestHddsDispatcher {
public void testContainerCloseActionWhenFull() throws IOException {
String testDir = GenericTestUtils.getTempPath(
TestHddsDispatcher.class.getSimpleName());
OzoneConfiguration conf = new OzoneConfiguration();
DatanodeDetails dd = randomDatanodeDetails();
VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
try {
UUID scmId = UUID.randomUUID();
OzoneConfiguration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, testDir);
DatanodeDetails dd = randomDatanodeDetails();
ContainerSet containerSet = new ContainerSet();
VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
StateContext context = Mockito.mock(StateContext.class);
@ -118,6 +120,7 @@ public class TestHddsDispatcher {
.addContainerActionIfAbsent(Mockito.any(ContainerAction.class));
} finally {
volumeSet.shutdown();
FileUtils.deleteDirectory(new File(testDir));
}

View File

@ -24,6 +24,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
import org.apache.hadoop.util.ReflectionUtils;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -40,10 +41,12 @@ public class TestRoundRobinVolumeChoosingPolicy {
private RoundRobinVolumeChoosingPolicy policy;
private List<HddsVolume> volumes;
private VolumeSet volumeSet;
private final String baseDir = MiniDFSCluster.getBaseDirectory();
private final String volume1 = baseDir + "disk1";
private final String volume2 = baseDir + "disk2";
private static final String DUMMY_IP_ADDR = "0.0.0.0";
@Before
@ -53,10 +56,18 @@ public class TestRoundRobinVolumeChoosingPolicy {
conf.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataDirKey);
policy = ReflectionUtils.newInstance(
RoundRobinVolumeChoosingPolicy.class, null);
VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
volumes = volumeSet.getVolumesList();
}
@After
public void cleanUp() {
if (volumeSet != null) {
volumeSet.shutdown();
volumeSet = null;
}
}
@Test
public void testRRVolumeChoosingPolicy() throws Exception {
HddsVolume hddsVolume1 = volumes.get(0);

View File

@ -237,6 +237,7 @@ public class TestVolumeSet {
//Set back to writable
try {
readOnlyVolumePath.setWritable(true);
volSet.shutdown();
} finally {
FileUtil.fullyDelete(volumePath);
}

View File

@ -100,6 +100,7 @@ public class TestVolumeSetDiskChecks {
for (String d : dirs) {
assertTrue(new File(d).isDirectory());
}
volumeSet.shutdown();
}
/**
@ -124,6 +125,7 @@ public class TestVolumeSetDiskChecks {
assertThat(volumeSet.getFailedVolumesList().size(), is(numBadVolumes));
assertThat(volumeSet.getVolumesList().size(),
is(numVolumes - numBadVolumes));
volumeSet.shutdown();
}
/**
@ -146,6 +148,7 @@ public class TestVolumeSetDiskChecks {
assertEquals(volumeSet.getFailedVolumesList().size(), numVolumes);
assertEquals(volumeSet.getVolumesList().size(), 0);
volumeSet.shutdown();
}
/**

View File

@ -229,14 +229,14 @@ public class TestKeyValueHandler {
@Test
public void testVolumeSetInKeyValueHandler() throws Exception{
File path = GenericTestUtils.getRandomizedTestDir();
Configuration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
try {
Configuration conf = new OzoneConfiguration();
conf.set(HDDS_DATANODE_DIR_KEY, path.getAbsolutePath());
ContainerSet cset = new ContainerSet();
int[] interval = new int[1];
interval[0] = 2;
ContainerMetrics metrics = new ContainerMetrics(interval);
VolumeSet volumeSet = new VolumeSet(UUID.randomUUID().toString(), conf);
DatanodeDetails datanodeDetails = Mockito.mock(DatanodeDetails.class);
DatanodeStateMachine stateMachine = Mockito.mock(
DatanodeStateMachine.class);
@ -263,6 +263,7 @@ public class TestKeyValueHandler {
ex);
}
} finally {
volumeSet.shutdown();
FileUtil.fullyDelete(path);
}
}

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
import org.apache.hadoop.ozone.container.keyvalue.helpers.BlockUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Rule;
import org.junit.Test;
@ -82,13 +83,20 @@ public class TestOzoneContainer {
conf.set(HddsConfigKeys.OZONE_METADATA_DIRS,
folder.newFolder().getAbsolutePath());
commitSpaceMap = new HashMap<String, Long>();
volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
}
@After
public void cleanUp() throws Exception {
if (volumeSet != null) {
volumeSet.shutdown();
volumeSet = null;
}
}
@Test
public void testBuildContainerMap() throws Exception {
volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
// Format the volumes
for (HddsVolume volume : volumeSet.getVolumesList()) {
volume.format(UUID.randomUUID().toString());
@ -139,8 +147,6 @@ public class TestOzoneContainer {
@Test
public void testContainerCreateDiskFull() throws Exception {
volumeSet = new VolumeSet(datanodeDetails.getUuidString(), conf);
volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
long containerSize = (long) StorageUnit.MB.toBytes(100);
boolean diskSpaceException = false;