HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are causing Release Audit Warnings. (Contributed by Ruth Wisniewski)

This commit is contained in:
Arpit Agarwal 2015-05-13 19:43:53 -07:00
parent 4356e8a5ef
commit 54fa9b4217
3 changed files with 22 additions and 5 deletions

View File

@ -752,6 +752,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8380. Always call addStoredBlock on blocks which have been shifted
from one storage to another (cmccabe)
HDFS-8243. Files written by TestHostsFiles and TestNameNodeMXBean are
causing Release Audit Warnings. (Ruth Wisniewski via Arpit Agarwal)
Release 2.7.1 - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -20,9 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertTrue;
import java.lang.management.ManagementFactory;
import java.io.File;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileSystem;
@ -126,7 +128,12 @@ public class TestHostsFiles {
assertTrue("Live nodes should contain the decommissioned node",
nodes.contains("Decommissioned"));
} finally {
cluster.shutdown();
if (cluster != null) {
cluster.shutdown();
}
if (localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
}
}
@ -167,6 +174,9 @@ public class TestHostsFiles {
if (cluster != null) {
cluster.shutdown();
}
if (localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
}
}
}

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import org.apache.commons.io.FileUtils;
import com.google.common.util.concurrent.Uninterruptibles;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
@ -214,6 +215,8 @@ public class TestNameNodeMXBean {
conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1);
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1);
MiniDFSCluster cluster = null;
FileSystem localFileSys = null;
Path dir = null;
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
@ -226,10 +229,9 @@ public class TestNameNodeMXBean {
"Hadoop:service=NameNode,name=NameNodeInfo");
// Define include file to generate deadNodes metrics
FileSystem localFileSys = FileSystem.getLocal(conf);
localFileSys = FileSystem.getLocal(conf);
Path workingDir = localFileSys.getWorkingDirectory();
Path dir = new Path(workingDir,
"build/test/data/temp/TestNameNodeMXBean");
dir = new Path(workingDir,"build/test/data/temp/TestNameNodeMXBean");
Path includeFile = new Path(dir, "include");
assertTrue(localFileSys.mkdirs(dir));
StringBuilder includeHosts = new StringBuilder();
@ -258,8 +260,10 @@ public class TestNameNodeMXBean {
assertTrue(deadNode.containsKey("decommissioned"));
assertTrue(deadNode.containsKey("xferaddr"));
}
} finally {
if ((localFileSys != null) && localFileSys.exists(dir)) {
FileUtils.deleteQuietly(new File(dir.toUri().getPath()));
}
if (cluster != null) {
cluster.shutdown();
}