HDFS-4572. Fix TestJournal failures on Windows. Contributed by Arpit Agarwal.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1455360 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-03-11 23:13:55 +00:00
parent 9d62cdd680
commit 9daa958c5a
4 changed files with 22 additions and 17 deletions

View File

@ -325,6 +325,8 @@ Trunk (Unreleased)
HDFS-4573. Fix TestINodeFile on Windows. (Arpit Agarwal via suresh)
HDFS-4572. Fix TestJournal failures on Windows. (Arpit Agarwal via suresh)
Release 2.0.5-beta - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -215,8 +215,8 @@ void format(NamespaceInfo nsInfo) throws IOException {
@Override // Closeable
public void close() throws IOException {
storage.close();
IOUtils.closeStream(committedTxnId);
IOUtils.closeStream(curSegment);
}
JNStorage getStorage() {

View File

@ -33,6 +33,7 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LayoutVersion;
import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
@ -663,7 +664,9 @@ FileLock tryLock() throws IOException {
file.write(jvmName.getBytes(Charsets.UTF_8));
LOG.info("Lock on " + lockF + " acquired by nodename " + jvmName);
} catch(OverlappingFileLockException oe) {
LOG.error("It appears that another namenode " + file.readLine()
// Cannot read from the locked file on Windows.
String lockingJvmName = Path.WINDOWS ? "" : (" " + file.readLine());
LOG.error("It appears that another namenode" + lockingJvmName
+ " has already locked the storage directory");
file.close();
return null;

View File

@ -36,10 +36,7 @@
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.After;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
import org.junit.*;
import org.mockito.Mockito;
public class TestJournal {
@ -77,7 +74,7 @@ public void cleanup() {
IOUtils.closeStream(journal);
}
@Test
@Test (timeout = 10000)
public void testEpochHandling() throws Exception {
assertEquals(0, journal.getLastPromisedEpoch());
NewEpochResponseProto newEpoch =
@ -110,7 +107,7 @@ public void testEpochHandling() throws Exception {
}
}
@Test
@Test (timeout = 10000)
public void testMaintainCommittedTxId() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1);
@ -125,7 +122,7 @@ public void testMaintainCommittedTxId() throws Exception {
assertEquals(3, journal.getCommittedTxnIdForTests());
}
@Test
@Test (timeout = 10000)
public void testRestartJournal() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1);
@ -149,7 +146,7 @@ public void testRestartJournal() throws Exception {
assertEquals(1, newEpoch.getLastSegmentTxId());
}
@Test
@Test (timeout = 10000)
public void testFormatResetsCachedValues() throws Exception {
journal.newEpoch(FAKE_NSINFO, 12345L);
journal.startLogSegment(new RequestInfo(JID, 12345L, 1L, 0L), 1L);
@ -158,6 +155,8 @@ public void testFormatResetsCachedValues() throws Exception {
assertEquals(12345L, journal.getLastWriterEpoch());
assertTrue(journal.isFormatted());
// Close the journal in preparation for reformatting it.
journal.close();
journal.format(FAKE_NSINFO_2);
assertEquals(0, journal.getLastPromisedEpoch());
@ -170,7 +169,7 @@ public void testFormatResetsCachedValues() throws Exception {
* before any transactions are written, that the next newEpoch() call
* returns the prior segment txid as its most recent segment.
*/
@Test
@Test (timeout = 10000)
public void testNewEpochAtBeginningOfSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1);
@ -182,7 +181,7 @@ public void testNewEpochAtBeginningOfSegment() throws Exception {
assertEquals(1, resp.getLastSegmentTxId());
}
@Test
@Test (timeout = 10000)
public void testJournalLocking() throws Exception {
Assume.assumeTrue(journal.getStorage().getStorageDir(0).isLockSupported());
StorageDirectory sd = journal.getStorage().getStorageDir(0);
@ -206,13 +205,14 @@ public void testJournalLocking() throws Exception {
// Hence, should be able to create a new Journal in the same dir.
Journal journal2 = new Journal(TEST_LOG_DIR, JID, mockErrorReporter);
journal2.newEpoch(FAKE_NSINFO, 2);
journal2.close();
}
/**
* Test finalizing a segment after some batch of edits were missed.
* This should fail, since we validate the log before finalization.
*/
@Test
@Test (timeout = 10000)
public void testFinalizeWhenEditsAreMissed() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
journal.startLogSegment(makeRI(1), 1);
@ -246,7 +246,7 @@ public void testFinalizeWhenEditsAreMissed() throws Exception {
* Ensure that finalizing a segment which doesn't exist throws the
* appropriate exception.
*/
@Test
@Test (timeout = 10000)
public void testFinalizeMissingSegment() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
try {
@ -267,7 +267,7 @@ public void testFinalizeMissingSegment() throws Exception {
* Eventually, the connection comes back, and the NN tries to start a new
* segment at a higher txid. This should abort the old one and succeed.
*/
@Test
@Test (timeout = 10000)
public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
@ -296,7 +296,7 @@ public void testAbortOldSegmentIfFinalizeIsMissed() throws Exception {
* Test behavior of startLogSegment() when a segment with the
* same transaction ID already exists.
*/
@Test
@Test (timeout = 10000)
public void testStartLogSegmentWhenAlreadyExists() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);
@ -345,7 +345,7 @@ private static RequestInfo makeRI(int serial) {
return new RequestInfo(JID, 1, serial, 0);
}
@Test
@Test (timeout = 10000)
public void testNamespaceVerification() throws Exception {
journal.newEpoch(FAKE_NSINFO, 1);