HDFS-4677. Merging change r1491096 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1491101 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Ivan Mitic 2013-06-08 23:49:26 +00:00
parent f55574bfdf
commit b134d65389
16 changed files with 114 additions and 46 deletions

View File

@ -481,6 +481,8 @@ Release 2.1.0-beta - UNRELEASED
HDFS-4610. Use common utils FileUtil#setReadable/Writable/Executable and
FileUtil#canRead/Write/Execute. (Ivan Mitic via suresh)
HDFS-4677. Editlog should support synchronous writes. (ivanmi)
BREAKDOWN OF HDFS-2802 HDFS SNAPSHOT SUBTASKS AND RELATED JIRAS
HDFS-4076. Support snapshot of single files. (szetszwo)

View File

@ -175,6 +175,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
public static final String DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH = "dfs.namenode.edits.noeditlogchannelflush";
public static final boolean DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT = false;
public static final String DFS_LIST_LIMIT = "dfs.ls.limit";
public static final int DFS_LIST_LIMIT_DEFAULT = 1000;

View File

@ -23,6 +23,7 @@ import java.util.List;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
@ -56,16 +57,18 @@ class JNStorage extends Storage {
ImmutableList.of(Pattern.compile("(\\d+)"));
/**
* @param conf Configuration object
* @param logDir the path to the directory in which data will be stored
* @param errorReporter a callback to report errors
* @throws IOException
*/
protected JNStorage(File logDir, StorageErrorReporter errorReporter) throws IOException {
protected JNStorage(Configuration conf, File logDir,
StorageErrorReporter errorReporter) throws IOException {
super(NodeType.JOURNAL_NODE);
sd = new StorageDirectory(logDir);
this.addStorageDir(sd);
this.fjm = new FileJournalManager(sd, errorReporter);
this.fjm = new FileJournalManager(conf, sd, errorReporter);
analyzeStorage();
}

View File

@ -32,6 +32,7 @@ import java.util.regex.Pattern;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException;
@ -133,9 +134,9 @@ class Journal implements Closeable {
*/
private static final int WARN_SYNC_MILLIS_THRESHOLD = 1000;
Journal(File logDir, String journalId,
Journal(Configuration conf, File logDir, String journalId,
StorageErrorReporter errorReporter) throws IOException {
storage = new JNStorage(logDir, errorReporter);
storage = new JNStorage(conf, logDir, errorReporter);
this.journalId = journalId;
refreshCachedData();

View File

@ -76,7 +76,7 @@ public class JournalNode implements Tool, Configurable {
if (journal == null) {
File logDir = getLogDir(jid);
LOG.info("Initializing journal in directory " + logDir);
journal = new Journal(logDir, jid, new ErrorReporter());
journal = new Journal(conf, logDir, jid, new ErrorReporter());
journalsById.put(jid, journal);
}

View File

@ -29,6 +29,8 @@ import java.nio.channels.FileChannel;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.io.IOUtils;
@ -48,6 +50,7 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
private FileChannel fc; // channel of the file stream for sync
private EditsDoubleBuffer doubleBuf;
static ByteBuffer fill = ByteBuffer.allocateDirect(MIN_PREALLOCATION_LENGTH);
private boolean shouldSyncWritesAndSkipFsync = false;
private static boolean shouldSkipFsyncForTests = false;
@ -61,17 +64,29 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
/**
* Creates output buffers and file object.
*
* @param conf
* Configuration object
* @param name
* File name to store edit log
* @param size
* Size of flush buffer
* @throws IOException
*/
public EditLogFileOutputStream(File name, int size) throws IOException {
public EditLogFileOutputStream(Configuration conf, File name, int size)
throws IOException {
super();
shouldSyncWritesAndSkipFsync = conf.getBoolean(
DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH,
DFSConfigKeys.DFS_NAMENODE_EDITS_NOEDITLOGCHANNELFLUSH_DEFAULT);
file = name;
doubleBuf = new EditsDoubleBuffer(size);
RandomAccessFile rp = new RandomAccessFile(name, "rw");
RandomAccessFile rp;
if (shouldSyncWritesAndSkipFsync) {
rp = new RandomAccessFile(name, "rws");
} else {
rp = new RandomAccessFile(name, "rw");
}
fp = new FileOutputStream(rp.getFD()); // open for append
fc = rp.getChannel();
fc.position(fc.size());
@ -182,9 +197,9 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
LOG.info("Nothing to flush");
return;
}
preallocate(); // preallocate file if necessay
preallocate(); // preallocate file if necessary
doubleBuf.flushTo(fp);
if (durable && !shouldSkipFsyncForTests) {
if (durable && !shouldSkipFsyncForTests && !shouldSyncWritesAndSkipFsync) {
fc.force(false); // metadata updates not needed
}
}

View File

@ -246,7 +246,7 @@ public class FSEditLog implements LogsPurgeable {
if (u.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
StorageDirectory sd = storage.getStorageDirectory(u);
if (sd != null) {
journalSet.add(new FileJournalManager(sd, storage), required);
journalSet.add(new FileJournalManager(conf, sd, storage), required);
}
} else {
journalSet.add(createJournal(u), required);

View File

@ -30,6 +30,7 @@ import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
@ -57,6 +58,7 @@ import com.google.common.collect.ComparisonChain;
public class FileJournalManager implements JournalManager {
private static final Log LOG = LogFactory.getLog(FileJournalManager.class);
private final Configuration conf;
private final StorageDirectory sd;
private final StorageErrorReporter errorReporter;
private int outputBufferCapacity = 512*1024;
@ -72,8 +74,9 @@ public class FileJournalManager implements JournalManager {
StoragePurger purger
= new NNStorageRetentionManager.DeletionStoragePurger();
public FileJournalManager(StorageDirectory sd,
public FileJournalManager(Configuration conf, StorageDirectory sd,
StorageErrorReporter errorReporter) {
this.conf = conf;
this.sd = sd;
this.errorReporter = errorReporter;
}
@ -102,8 +105,8 @@ public class FileJournalManager implements JournalManager {
throws IOException {
try {
currentInProgress = NNStorage.getInProgressEditsFile(sd, txid);
EditLogOutputStream stm = new EditLogFileOutputStream(currentInProgress,
outputBufferCapacity);
EditLogOutputStream stm = new EditLogFileOutputStream(conf,
currentInProgress, outputBufferCapacity);
stm.create();
return stm;
} catch (IOException e) {

View File

@ -22,6 +22,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileOutputStream;
@ -39,7 +40,8 @@ public class BinaryEditsVisitor implements OfflineEditsVisitor {
* @param filename Name of file to write output to
*/
public BinaryEditsVisitor(String outputName) throws IOException {
this.elfos = new EditLogFileOutputStream(new File(outputName), 0);
this.elfos = new EditLogFileOutputStream(new Configuration(),
new File(outputName), 0);
elfos.create();
}
@ -64,4 +66,4 @@ public class BinaryEditsVisitor implements OfflineEditsVisitor {
public void visitOp(FSEditLogOp op) throws IOException {
elfos.write(op);
}
}
}

View File

@ -1290,4 +1290,21 @@
</description>
</property>
<property>
<name>dfs.namenode.edits.noeditlogchannelflush</name>
<value>false</value>
<description>
Specifies whether to flush edit log file channel. When set, expensive
FileChannel#force calls are skipped and synchronous disk writes are
enabled instead by opening the edit log file with RandomAccessFile("rws")
flags. This can significantly improve the performance of edit log writes
on the Windows platform.
Note that the behavior of the "rws" flags is platform and hardware specific
and might not provide the same level of guarantees as FileChannel#force.
For example, the write will skip the disk-cache on SAS and SCSI devices
while it might not on SATA devices. This is an expert level setting,
change with caution.
</description>
</property>
</configuration>

View File

@ -22,6 +22,7 @@ import static org.junit.Assert.*;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.qjournal.QJMTestUtil;
@ -53,13 +54,16 @@ public class TestJournal {
private StorageErrorReporter mockErrorReporter = Mockito.mock(
StorageErrorReporter.class);
private Configuration conf;
private Journal journal;
@Before
public void setup() throws Exception {
FileUtil.fullyDelete(TEST_LOG_DIR);
journal = new Journal(TEST_LOG_DIR, JID, mockErrorReporter);
conf = new Configuration();
journal = new Journal(conf, TEST_LOG_DIR, JID,
mockErrorReporter);
journal.format(FAKE_NSINFO);
}
@ -135,7 +139,7 @@ public class TestJournal {
journal.close(); // close to unlock the storage dir
// Now re-instantiate, make sure history is still there
journal = new Journal(TEST_LOG_DIR, JID, mockErrorReporter);
journal = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
// The storage info should be read, even if no writer has taken over.
assertEquals(storageString,
@ -192,7 +196,7 @@ public class TestJournal {
journal.newEpoch(FAKE_NSINFO, 1);
try {
new Journal(TEST_LOG_DIR, JID, mockErrorReporter);
new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
fail("Did not fail to create another journal in same dir");
} catch (IOException ioe) {
GenericTestUtils.assertExceptionContains(
@ -203,7 +207,7 @@ public class TestJournal {
// Journal should no longer be locked after the close() call.
// Hence, should be able to create a new Journal in the same dir.
Journal journal2 = new Journal(TEST_LOG_DIR, JID, mockErrorReporter);
Journal journal2 = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
journal2.newEpoch(FAKE_NSINFO, 2);
journal2.close();
}
@ -231,7 +235,7 @@ public class TestJournal {
// Check that, even if we re-construct the journal by scanning the
// disk, we don't allow finalizing incorrectly.
journal.close();
journal = new Journal(TEST_LOG_DIR, JID, mockErrorReporter);
journal = new Journal(conf, TEST_LOG_DIR, JID, mockErrorReporter);
try {
journal.finalizeLogSegment(makeRI(4), 1, 6);

View File

@ -762,7 +762,7 @@ public class TestEditLog {
File log = new File(currentDir,
NNStorage.getInProgressEditsFileName(3));
EditLogFileOutputStream stream = new EditLogFileOutputStream(log, 1024);
EditLogFileOutputStream stream = new EditLogFileOutputStream(conf, log, 1024);
try {
stream.create();
if (!inBothDirs) {
@ -1233,7 +1233,7 @@ public class TestEditLog {
EditLogFileOutputStream elfos = null;
EditLogFileInputStream elfis = null;
try {
elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0);
elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0);
elfos.create();
elfos.writeRaw(garbage, 0, garbage.length);
elfos.setReadyToFlush();

View File

@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue;
import java.io.File;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.util.StringUtils;
import org.junit.After;
import org.junit.Before;
@ -40,6 +41,8 @@ public class TestEditLogFileOutputStream {
final static int MIN_PREALLOCATION_LENGTH =
EditLogFileOutputStream.MIN_PREALLOCATION_LENGTH;
private Configuration conf;
static {
// No need to fsync for the purposes of tests. This makes
// the tests run much faster.
@ -52,6 +55,11 @@ public class TestEditLogFileOutputStream {
if (TEST_EDITS.exists()) TEST_EDITS.delete();
}
@Before
public void setUp() {
conf = new Configuration();
}
static void flushAndCheckLength(EditLogFileOutputStream elos,
long expectedLength) throws IOException {
elos.setReadyToFlush();
@ -66,7 +74,8 @@ public class TestEditLogFileOutputStream {
@Test
public void testRawWrites() throws IOException {
EditLogFileOutputStream elos = new EditLogFileOutputStream(TEST_EDITS, 0);
EditLogFileOutputStream elos = new EditLogFileOutputStream(conf, TEST_EDITS,
0);
try {
byte[] small = new byte[] {1,2,3,4,5,8,7};
elos.create();
@ -105,7 +114,7 @@ public class TestEditLogFileOutputStream {
public void testEditLogFileOutputStreamCloseAbort() throws IOException {
// abort after a close should just ignore
EditLogFileOutputStream editLogStream =
new EditLogFileOutputStream(TEST_EDITS, 0);
new EditLogFileOutputStream(conf, TEST_EDITS, 0);
editLogStream.close();
editLogStream.abort();
}
@ -118,7 +127,7 @@ public class TestEditLogFileOutputStream {
public void testEditLogFileOutputStreamCloseClose() throws IOException {
// close after a close should result in an IOE
EditLogFileOutputStream editLogStream =
new EditLogFileOutputStream(TEST_EDITS, 0);
new EditLogFileOutputStream(conf, TEST_EDITS, 0);
editLogStream.close();
try {
editLogStream.close();
@ -136,7 +145,7 @@ public class TestEditLogFileOutputStream {
public void testEditLogFileOutputStreamAbortAbort() throws IOException {
// abort after a close should just ignore
EditLogFileOutputStream editLogStream =
new EditLogFileOutputStream(TEST_EDITS, 0);
new EditLogFileOutputStream(conf, TEST_EDITS, 0);
editLogStream.abort();
editLogStream.abort();
}

View File

@ -36,6 +36,7 @@ import java.util.PriorityQueue;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.JournalManager.CorruptionException;
@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.TestEditLog.AbortSpec;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Before;
import org.junit.Test;
import com.google.common.base.Joiner;
@ -52,12 +54,19 @@ import com.google.common.collect.TreeMultiset;
public class TestFileJournalManager {
static final Log LOG = LogFactory.getLog(TestFileJournalManager.class);
private Configuration conf;
static {
// No need to fsync for the purposes of tests. This makes
// the tests run much faster.
EditLogFileOutputStream.setShouldSkipFsyncForTesting(true);
}
@Before
public void setUp() {
conf = new Configuration();
}
/**
* Find out how many transactions we can read from a
* FileJournalManager, starting at a given transaction ID.
@ -116,7 +125,7 @@ public class TestFileJournalManager {
long numJournals = 0;
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
numJournals++;
}
@ -136,7 +145,7 @@ public class TestFileJournalManager {
5, new AbortSpec(5, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL,
getNumberOfTransactions(jm, 1, true, false));
}
@ -159,16 +168,16 @@ public class TestFileJournalManager {
5, new AbortSpec(5, 1));
Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd = dirs.next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
sd = dirs.next();
jm = new FileJournalManager(sd, storage);
jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
sd = dirs.next();
jm = new FileJournalManager(sd, storage);
jm = new FileJournalManager(conf, sd, storage);
assertEquals(6*TXNS_PER_ROLL, getNumberOfTransactions(jm, 1, true, false));
}
@ -192,17 +201,17 @@ public class TestFileJournalManager {
new AbortSpec(5, 2));
Iterator<StorageDirectory> dirs = storage.dirIterator(NameNodeDirType.EDITS);
StorageDirectory sd = dirs.next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
sd = dirs.next();
jm = new FileJournalManager(sd, storage);
jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
sd = dirs.next();
jm = new FileJournalManager(sd, storage);
jm = new FileJournalManager(conf, sd, storage);
assertEquals(5*TXNS_PER_ROLL + TXNS_PER_FAIL, getNumberOfTransactions(jm, 1,
true, false));
}
@ -227,7 +236,7 @@ public class TestFileJournalManager {
10, new AbortSpec(10, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
String sdRootPath = sd.getRoot().getAbsolutePath();
FileUtil.chmod(sdRootPath, "-w", true);
try {
@ -252,7 +261,7 @@ public class TestFileJournalManager {
10, new AbortSpec(10, 0));
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
long expectedTotalTxnCount = TXNS_PER_ROLL*10 + TXNS_PER_FAIL;
assertEquals(expectedTotalTxnCount, getNumberOfTransactions(jm, 1,
true, false));
@ -278,7 +287,7 @@ public class TestFileJournalManager {
10);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
// 10 rolls, so 11 rolled files, 110 txids total.
final int TOTAL_TXIDS = 10 * 11;
@ -316,7 +325,7 @@ public class TestFileJournalManager {
assertEquals(1, files.length);
assertTrue(files[0].delete());
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(startGapTxId-1, getNumberOfTransactions(jm, 1, true, true));
assertEquals(0, getNumberOfTransactions(jm, startGapTxId, true, true));
@ -349,7 +358,7 @@ public class TestFileJournalManager {
corruptAfterStartSegment(files[0]);
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
assertEquals(10*TXNS_PER_ROLL+1,
getNumberOfTransactions(jm, 1, true, false));
}
@ -364,7 +373,7 @@ public class TestFileJournalManager {
NNStorage.getFinalizedEditsFileName(1001, 1100));
// passing null for NNStorage because this unit test will not use it
FileJournalManager fjm = new FileJournalManager(sd, null);
FileJournalManager fjm = new FileJournalManager(conf, sd, null);
assertEquals("[1,100],[101,200],[1001,1100]", getLogsAsString(fjm, 1));
assertEquals("[101,200],[1001,1100]", getLogsAsString(fjm, 101));
assertEquals("[1001,1100]", getLogsAsString(fjm, 201));
@ -428,7 +437,7 @@ public class TestFileJournalManager {
10);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
EditLogInputStream elis = getJournalInputStream(jm, 5, true);
FSEditLogOp op = elis.readOp();
@ -449,7 +458,7 @@ public class TestFileJournalManager {
10, false);
StorageDirectory sd = storage.dirIterator(NameNodeDirType.EDITS).next();
FileJournalManager jm = new FileJournalManager(sd, storage);
FileJournalManager jm = new FileJournalManager(conf, sd, storage);
// If we exclude the in-progess stream, we should only have 100 tx.
assertEquals(100, getNumberOfTransactions(jm, 1, false, false));

View File

@ -266,12 +266,12 @@ public class TestNNStorageRetentionManager {
Joiner.on(",").join(purgedPaths));
}
private static class TestCaseDescription {
private class TestCaseDescription {
private Map<File, FakeRoot> dirRoots = Maps.newHashMap();
private Set<File> expectedPurgedLogs = Sets.newLinkedHashSet();
private Set<File> expectedPurgedImages = Sets.newLinkedHashSet();
private static class FakeRoot {
private class FakeRoot {
NameNodeDirType type;
List<File> files;
@ -331,7 +331,7 @@ public class TestNNStorageRetentionManager {
if (!root.type.isOfType(NameNodeDirType.EDITS)) continue;
// passing null NNStorage for unit test because it does not use it
FileJournalManager fjm = new FileJournalManager(
FileJournalManager fjm = new FileJournalManager(conf,
root.mockStorageDir(), null);
fjm.purger = purger;
jms.add(fjm);

View File

@ -74,7 +74,7 @@ public class TestNameNodeRecovery {
EditLogFileOutputStream elfos = null;
EditLogFileInputStream elfis = null;
try {
elfos = new EditLogFileOutputStream(TEST_LOG_NAME, 0);
elfos = new EditLogFileOutputStream(new Configuration(), TEST_LOG_NAME, 0);
elfos.create();
elts.addTransactionsToLog(elfos, cache);