diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index eff1b86bf47..f51e265db9a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -312,6 +312,9 @@ Release 2.0.3-alpha - Unreleased HDFS-4279. NameNode does not initialize generic conf keys when started with -recover. (Colin Patrick McCabe via atm) + HDFS-4291. edit log unit tests leave stray test_edit_log_file around + (Colin Patrick McCabe via todd) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 357739c1689..27c4173a6bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -1209,22 +1209,19 @@ public class TestEditLog { * */ static void validateNoCrash(byte garbage[]) throws IOException { - final String TEST_LOG_NAME = "test_edit_log"; + final File TEST_LOG_NAME = new File(TEST_DIR, "test_edit_log"); EditLogFileOutputStream elfos = null; - File file = null; EditLogFileInputStream elfis = null; try { - file = new File(TEST_LOG_NAME); - elfos = new EditLogFileOutputStream(file, 0); + elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0); elfos.create(); elfos.writeRaw(garbage, 0, garbage.length); elfos.setReadyToFlush(); elfos.flushAndSync(true); elfos.close(); elfos = null; - file = new File(TEST_LOG_NAME); - elfis = new EditLogFileInputStream(file); + elfis = new EditLogFileInputStream(TEST_LOG_NAME); // verify that we can read everything without killing the JVM or // throwing an exception other than IOException diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 5f18ee4236b..c73dbc197d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -59,6 +59,8 @@ import com.google.common.collect.Sets; public class TestNameNodeRecovery { private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class); private static StartupOption recoverStartOpt = StartupOption.RECOVER; + private static final File TEST_DIR = new File( + System.getProperty("test.build.data","build/test/data")); static { recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL); @@ -66,15 +68,13 @@ public class TestNameNodeRecovery { } static void runEditLogTest(EditLogTestSetup elts) throws IOException { - final String TEST_LOG_NAME = "test_edit_log"; + final File TEST_LOG_NAME = new File(TEST_DIR, "test_edit_log"); final OpInstanceCache cache = new OpInstanceCache(); EditLogFileOutputStream elfos = null; - File file = null; EditLogFileInputStream elfis = null; try { - file = new File(TEST_LOG_NAME); - elfos = new EditLogFileOutputStream(file, 0); + elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0); elfos.create(); elts.addTransactionsToLog(elfos, cache); @@ -82,8 +82,7 @@ public class TestNameNodeRecovery { elfos.flushAndSync(true); elfos.close(); elfos = null; - file = new File(TEST_LOG_NAME); - elfis = new EditLogFileInputStream(file); + elfis = new EditLogFileInputStream(TEST_LOG_NAME); // reading through normally will get you an exception Set validTxIds = elts.getValidTxIds();