diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9132a7ef810..c1bc8a372f5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -481,6 +481,8 @@ Release 2.1.0-beta - UNRELEASED HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable and FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh) + HDFS-4677. Editlog should support synchronous writes. (ivanmi) + BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS AND RELATED JIRAS HDFS-4076. Support snapshot of single files. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 361ea9919ab..1de39c52801 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -175,6 +175,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum"; public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1; + + public static final String DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH = "dfs.namenode.edits.noeditlogchannelflush"; + public static final boolean DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT = false; public static final String DFS_LIST_LIMIT = "dfs.ls.limit"; public static final int DFS_LIST_LIMIT_DEFAULT = 1000; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java index b2f65debc57..347ac53a1d8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JNStorage.java @@ -23,6 +23,7 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; @@ -56,16 +57,18 @@ class JNStorage extends Storage { ImmutableList.of(Pattern.compile("(\\d+)")); /** + * @param conf Configuration object * @param logDir the path to the directory in which data will be stored * @param errorReporter a callback to report errors * @throws IOException */ - protected JNStorage(File logDir, StorageErrorReporter errorReporter) throws IOException { + protected JNStorage(Configuration conf, File logDir, + StorageErrorReporter errorReporter) throws IOException { super(NodeType.JOURNAL_NODE); sd = new StorageDirectory(logDir); this.addStorageDir(sd); - this.fjm = new FileJournalManager(sd, errorReporter); + this.fjm = new FileJournalManager(conf, sd, errorReporter); analyzeStorage(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 16b5694c927..75b5a708267 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; @@ -133,9 +134,9 @@ class Journal implements Closeable { */ private static final int WARN_SYNC_MILLIS_THRESHOLD = 1000; - Journal(File logDir, String journalId, + Journal(Configuration conf, File logDir, String journalId, StorageErrorReporter errorReporter) throws IOException { - storage = new JNStorage(logDir, errorReporter); + storage = new JNStorage(conf, logDir, errorReporter); this.journalId = journalId; refreshCachedData(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index fe08f209f4a..8291b5932eb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -76,7 +76,7 @@ synchronized Journal getOrCreateJournal(String jid) throws IOException { if (journal == null) { File logDir = getLogDir(jid); LOG.info("Initializing journal in directory " + logDir); - journal = new Journal(logDir, jid, new ErrorReporter()); + journal = new Journal(conf, logDir, jid, new ErrorReporter()); journalsById.put(jid, journal); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java index 901c5c0a89e..92e375cc236 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogFileOutputStream.java @@ -29,6 +29,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.io.IOUtils; @@ -48,6 +50,7 @@ public class EditLogFileOutputStream extends EditLogOutputStream { private FileChannel fc; // channel of the file stream for sync private EditsDoubleBuffer doubleBuf; static ByteBuffer fill = ByteBuffer.allocateDirect(MIN_PREALLOCATION_LENGTH); + private boolean shouldSyncWritesAndSkipFsync = false; private static boolean shouldSkipFsyncForTests = false; @@ -61,17 +64,29 @@ public class EditLogFileOutputStream extends EditLogOutputStream { /** * Creates output buffers and file object. * + * @param conf + * Configuration object * @param name * File name to store edit log * @param size * Size of flush buffer * @throws IOException */ - public EditLogFileOutputStream(File name, int size) throws IOException { + public EditLogFileOutputStream(Configuration conf, File name, int size) + throws IOException { super(); + shouldSyncWritesAndSkipFsync = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH, + DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT); + file = name; doubleBuf = new EditsDoubleBuffer(size); - RandomAccessFile rp = new RandomAccessFile(name, "rw"); + RandomAccessFile rp; + if (shouldSyncWritesAndSkipFsync) { + rp = new RandomAccessFile(name, "rws"); + } else { + rp = new RandomAccessFile(name, "rw"); + } fp = new FileOutputStream(rp.getFD()); // open for append fc = rp.getChannel(); fc.position(fc.size()); @@ -182,9 +197,9 @@ public void flushAndSync(boolean durable) throws IOException { LOG.info("Nothing to flush"); return; } - preallocate(); // preallocate file if necessay + preallocate(); // preallocate file if necessary doubleBuf.flushTo(fp); - if (durable && !shouldSkipFsyncForTests) { + if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) { fc.force(false); // metadata updates not needed } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index b05ac9a811e..611b2126f1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -246,7 +246,7 @@ private synchronized void initJournals(List dirs) { if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) { StorageDirectory sd = storage.getStorageDirectory(u); if (sd != null) { - journalSet.add(new FileJournalManager(sd, storage), required); + journalSet.add(new FileJournalManager(conf, sd, storage), required); } } else { journalSet.add(createJournal(u), required); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index 435216c71b1..f745693ceb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -30,6 +30,7 @@ import java.util.regex.Pattern; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; @@ -57,6 +58,7 @@ public class FileJournalManager implements JournalManager { private static final Log LOG = LogFactory.getLog(FileJournalManager.class); + private final Configuration conf; private final StorageDirectory sd; private final StorageErrorReporter errorReporter; private int outputBufferCapacity = 512*1024; @@ -72,8 +74,9 @@ public class FileJournalManager implements JournalManager { StoragePurger purger = new NNStorageRetentionManager.DeletionStoragePurger(); - public FileJournalManager(StorageDirectory sd, + public FileJournalManager(Configuration conf, StorageDirectory sd, StorageErrorReporter errorReporter) { + this.conf = conf; this.sd = sd; this.errorReporter = errorReporter; } @@ -102,8 +105,8 @@ synchronized public EditLogOutputStream startLogSegment(long txid) throws IOException { try { currentInProgress = NNStorage.getInProgressEditsFile(sd, txid); - EditLogOutputStream stm = new EditLogFileOutputStream(currentInProgress, - outputBufferCapacity); + EditLogOutputStream stm = new EditLogFileOutputStream(conf, + currentInProgress, outputBufferCapacity); stm.create(); return stm; } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java index 8c4fcbbbe3a..6ada9dbbb8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/BinaryEditsVisitor.java @@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream; @@ -39,7 +40,8 @@ public class BinaryEditsVisitor implements OfflineEditsVisitor { * @param filename Name of file to write output to */ public BinaryEditsVisitor(String outputName) throws IOException { - this.elfos = new EditLogFileOutputStream(new File(outputName), 0); + this.elfos = new EditLogFileOutputStream(new Configuration(), + new File(outputName), 0); elfos.create(); } @@ -64,4 +66,4 @@ public void close(Throwable error) throws IOException { public void visitOp(FSEditLogOp op) throws IOException { elfos.write(op); } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 177d1e4a69e..344d55efa7c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -1290,4 +1290,21 @@ + + dfs.namenode.edits.noeditlogchannelflush + false + + Specifies whether to flush edit log file channel. When set, expensive + FileChannel#force calls are skipped and synchronous disk writes are + enabled instead by opening the edit log file with RandomAccessFile("rws") + flags. This can significantly improve the performance of edit log writes + on the Windows platform. + Note that the behavior of the "rws" flags is platform and hardware specific + and might not provide the same level of guarantees as FileChannel#force. + For example, the write will skip the disk-cache on SAS and SCSI devices + while it might not on SATA devices. This is an expert level setting, + change with caution. + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java index 3c4d575db87..e94dbea83c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournal.java @@ -22,6 +22,7 @@ import java.io.File; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.qjournal.QJMTestUtil; @@ -53,13 +54,16 @@ public class TestJournal { private StorageErrorReporter mockErrorReporter = Mockito.mock( StorageErrorReporter.class); + private Configuration conf; private Journal journal; @Before public void setup() throws Exception { FileUtil.fullyDelete(TEST_LOG_DIR); - journal = new Journal(TEST_LOG_DIR, JID, mockErrorReporter); + conf = new Configuration(); + journal = new Journal(conf, TEST_LOG_DIR, JID, + mockErrorReporter); journal.format(FAKE_NSINFO); } @@ -135,7 +139,7 @@ public void testRestartJournal() throws Exception { journal.close(); // close to unlock the storage dir // Now re-instantiate, make sure history is still there - journal = new Journal(TEST_LOG_DIR, JID, mockErrorReporter); + journal = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter); // The storage info should be read, even if no writer has taken over. assertEquals(storageString, @@ -192,7 +196,7 @@ public void testJournalLocking() throws Exception { journal.newEpoch(FAKE_NSINFO, 1); try { - new Journal(TEST_LOG_DIR, JID, mockErrorReporter); + new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter); fail("Did not fail to create another journal in same dir"); } catch (IOException ioe) { GenericTestUtils.assertExceptionContains( @@ -203,7 +207,7 @@ public void testJournalLocking() throws Exception { // Journal should no longer be locked after the close() call. // Hence, should be able to create a new Journal in the same dir. - Journal journal2 = new Journal(TEST_LOG_DIR, JID, mockErrorReporter); + Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter); journal2.newEpoch(FAKE_NSINFO, 2); journal2.close(); } @@ -231,7 +235,7 @@ public void testFinalizeWhenEditsAreMissed() throws Exception { // Check that, even if we re-construct the journal by scanning the // disk, we don't allow finalizing incorrectly. journal.close(); - journal = new Journal(TEST_LOG_DIR, JID, mockErrorReporter); + journal = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter); try { journal.finalizeLogSegment(makeRI(4), 1, 6); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index 0cabf0818dc..119bf597ff1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -762,7 +762,7 @@ private void doTestCrashRecoveryEmptyLog(boolean inBothDirs, File log = new File(currentDir, NNStorage.getInProgressEditsFileName(3)); - EditLogFileOutputStream stream = new EditLogFileOutputStream(log, 1024); + EditLogFileOutputStream stream = new EditLogFileOutputStream(conf, log, 1024); try { stream.create(); if (!inBothDirs) { @@ -1233,7 +1233,7 @@ static void validateNoCrash(byte garbage[]) throws IOException { EditLogFileOutputStream elfos = null; EditLogFileInputStream elfis = null; try { - elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0); + elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0); elfos.create(); elfos.writeRaw(garbage, 0, garbage.length); elfos.setReadyToFlush(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java index 7f237a8213a..4f737ad076d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java @@ -24,6 +24,7 @@ import java.io.File; import java.io.IOException; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.junit.After; import org.junit.Before; @@ -40,6 +41,8 @@ public class TestEditLogFileOutputStream { final static int MIN_PREALLOCATION_LENGTH = EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH; + private Configuration conf; + static { // No need to fsync for the purposes of tests. This makes // the tests run much faster. @@ -52,6 +55,11 @@ public void deleteEditsFile() { if (TEST_EDITS.exists()) TEST_EDITS.delete(); } + @Before + public void setUp() { + conf = new Configuration(); + } + static void flushAndCheckLength(EditLogFileOutputStream elos, long expectedLength) throws IOException { elos.setReadyToFlush(); @@ -66,7 +74,8 @@ static void flushAndCheckLength(EditLogFileOutputStream elos, @Test public void testRawWrites() throws IOException { - EditLogFileOutputStream elos = new EditLogFileOutputStream(TEST_EDITS, 0); + EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, TEST_EDITS, + 0); try { byte[] small = new byte[] {1,2,3,4,5,8,7}; elos.create(); @@ -105,7 +114,7 @@ public void testRawWrites() throws IOException { public void testEditLogFileOutputStreamCloseAbort() throws IOException { // abort after a close should just ignore EditLogFileOutputStream editLogStream = - new EditLogFileOutputStream(TEST_EDITS, 0); + new EditLogFileOutputStream(conf, TEST_EDITS, 0); editLogStream.close(); editLogStream.abort(); } @@ -118,7 +127,7 @@ public void testEditLogFileOutputStreamCloseAbort() throws IOException { public void testEditLogFileOutputStreamCloseClose() throws IOException { // close after a close should result in an IOE EditLogFileOutputStream editLogStream = - new EditLogFileOutputStream(TEST_EDITS, 0); + new EditLogFileOutputStream(conf, TEST_EDITS, 0); editLogStream.close(); try { editLogStream.close(); @@ -136,7 +145,7 @@ public void testEditLogFileOutputStreamCloseClose() throws IOException { public void testEditLogFileOutputStreamAbortAbort() throws IOException { // abort after a close should just ignore EditLogFileOutputStream editLogStream = - new EditLogFileOutputStream(TEST_EDITS, 0); + new EditLogFileOutputStream(conf, TEST_EDITS, 0); editLogStream.abort(); editLogStream.abort(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index 6863329cd77..7b4d7da2e73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException; @@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.junit.Before; import org.junit.Test; import com.google.common.base.Joiner; @@ -52,12 +54,19 @@ public class TestFileJournalManager { static final Log LOG = LogFactory.getLog(TestFileJournalManager.class); + private Configuration conf; + static { // No need to fsync for the purposes of tests. This makes // the tests run much faster. EditLogFileOutputStream.setShouldSkipFsyncForTesting(true); } + @Before + public void setUp() { + conf = new Configuration(); + } + /** * Find out how many transactions we can read from a * FileJournalManager, starting at a given transaction ID. @@ -116,7 +125,7 @@ public void testNormalOperation() throws IOException { long numJournals = 0; for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) { - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false)); numJournals++; } @@ -136,7 +145,7 @@ public void testInprogressRecovery() throws IOException { 5, new AbortSpec(5, 0)); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); } @@ -159,16 +168,16 @@ public void testInprogressRecoveryMixed() throws IOException { 5, new AbortSpec(5, 1)); Iterator dirs = storage.dirIterator(NameNodeDirType.EDITS); StorageDirectory sd = dirs.next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); - jm = new FileJournalManager(sd, storage); + jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); - jm = new FileJournalManager(sd, storage); + jm = new FileJournalManager(conf, sd, storage); assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false)); } @@ -192,17 +201,17 @@ public void testInprogressRecoveryAll() throws IOException { new AbortSpec(5, 2)); Iterator dirs = storage.dirIterator(NameNodeDirType.EDITS); StorageDirectory sd = dirs.next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); - jm = new FileJournalManager(sd, storage); + jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); sd = dirs.next(); - jm = new FileJournalManager(sd, storage); + jm = new FileJournalManager(conf, sd, storage); assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1, true, false)); } @@ -227,7 +236,7 @@ public void testFinalizeErrorReportedToNNStorage() throws IOException, Interrupt 10, new AbortSpec(10, 0)); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); String sdRootPath = sd.getRoot().getAbsolutePath(); FileUtil.chmod(sdRootPath, "-w", true); try { @@ -252,7 +261,7 @@ public void testReadFromStream() throws IOException { 10, new AbortSpec(10, 0)); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); long expectedTotalTxnCount = TXNS_PER_ROLL*10 + TXNS_PER_FAIL; assertEquals(expectedTotalTxnCount, getNumberOfTransactions(jm, 1, true, false)); @@ -278,7 +287,7 @@ public void testAskForTransactionsMidfile() throws IOException { 10); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); // 10 rolls, so 11 rolled files, 110 txids total. final int TOTAL_TXIDS = 10 * 11; @@ -316,7 +325,7 @@ public boolean accept(File dir, String name) { assertEquals(1, files.length); assertTrue(files[0].delete()); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(startGapTxId-1, getNumberOfTransactions(jm, 1, true, true)); assertEquals(0, getNumberOfTransactions(jm, startGapTxId, true, true)); @@ -349,7 +358,7 @@ public boolean accept(File dir, String name) { corruptAfterStartSegment(files[0]); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); assertEquals(10*TXNS_PER_ROLL+1, getNumberOfTransactions(jm, 1, true, false)); } @@ -364,7 +373,7 @@ public void testGetRemoteEditLog() throws IOException { NNStorage.getFinalizedEditsFileName(1001, 1100)); // passing null for NNStorage because this unit test will not use it - FileJournalManager fjm = new FileJournalManager(sd, null); + FileJournalManager fjm = new FileJournalManager(conf, sd, null); assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1)); assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101)); assertEquals("[1001,1100]", getLogsAsString(fjm, 201)); @@ -428,7 +437,7 @@ public void testReadFromMiddleOfEditLog() throws CorruptionException, 10); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); EditLogInputStream elis = getJournalInputStream(jm, 5, true); FSEditLogOp op = elis.readOp(); @@ -449,7 +458,7 @@ public void testExcludeInProgressStreams() throws CorruptionException, 10, false); StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next(); - FileJournalManager jm = new FileJournalManager(sd, storage); + FileJournalManager jm = new FileJournalManager(conf, sd, storage); // If we exclude the in-progess stream, we should only have 100 tx. assertEquals(100, getNumberOfTransactions(jm, 1, false, false)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java index 1eec8d2e58e..498d3931622 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java @@ -266,12 +266,12 @@ private void runTest(TestCaseDescription tc) throws IOException { Joiner.on(",").join(purgedPaths)); } - private static class TestCaseDescription { + private class TestCaseDescription { private Map dirRoots = Maps.newHashMap(); private Set expectedPurgedLogs = Sets.newLinkedHashSet(); private Set expectedPurgedImages = Sets.newLinkedHashSet(); - private static class FakeRoot { + private class FakeRoot { NameNodeDirType type; List files; @@ -331,7 +331,7 @@ public FSEditLog mockEditLog(StoragePurger purger) throws IOException { if (!root.type.isOfType(NameNodeDirType.EDITS)) continue; // passing null NNStorage for unit test because it does not use it - FileJournalManager fjm = new FileJournalManager( + FileJournalManager fjm = new FileJournalManager(conf, root.mockStorageDir(), null); fjm.purger = purger; jms.add(fjm); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index c40d07c66ad..c9af3cf203b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -74,7 +74,7 @@ static void runEditLogTest(EditLogTestSetup elts) throws IOException { EditLogFileOutputStream elfos = null; EditLogFileInputStream elfis = null; try { - elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0); + elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0); elfos.create(); elts.addTransactionsToLog(elfos, cache);