HDFS-4725. Merge r1470771 from trunk

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1486144 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2013-05-24 18:15:42 +00:00
parent 57cf1e8fbf
commit 4a16d5b759
8 changed files with 146 additions and 112 deletions

View File

@ -276,6 +276,9 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4693. Some test cases in TestCheckpoint do not clean up after HDFS-4693. Some test cases in TestCheckpoint do not clean up after
themselves. (Arpit Agarwal, suresh via suresh) themselves. (Arpit Agarwal, suresh via suresh)
HDFS-4725. Fix HDFS file handle leaks in FSEditLog, NameNode,
OfflineEditsBinaryLoader and some tests. (Chris Nauroth via szetszwo)
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes. HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.

View File

@ -297,12 +297,14 @@ public class FSEditLog implements LogsPurgeable {
LOG.debug("Closing log when already closed"); LOG.debug("Closing log when already closed");
return; return;
} }
try {
if (state == State.IN_SEGMENT) { if (state == State.IN_SEGMENT) {
assert editLogStream != null; assert editLogStream != null;
waitForSyncToFinish(); waitForSyncToFinish();
endCurrentLogSegment(true); endCurrentLogSegment(true);
} }
} finally {
if (journalSet != null && !journalSet.isEmpty()) { if (journalSet != null && !journalSet.isEmpty()) {
try { try {
journalSet.close(); journalSet.close();
@ -310,9 +312,9 @@ public class FSEditLog implements LogsPurgeable {
LOG.warn("Error closing journalSet", ioe); LOG.warn("Error closing journalSet", ioe);
} }
} }
state = State.CLOSED; state = State.CLOSED;
} }
}
/** /**
@ -563,6 +565,7 @@ public class FSEditLog implements LogsPurgeable {
"due to " + e.getMessage() + ". " + "due to " + e.getMessage() + ". " +
"Unsynced transactions: " + (txid - synctxid); "Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception()); LOG.fatal(msg, new Exception());
IOUtils.cleanup(LOG, journalSet);
terminate(1, msg); terminate(1, msg);
} }
} finally { } finally {
@ -586,6 +589,7 @@ public class FSEditLog implements LogsPurgeable {
"Could not sync enough journals to persistent storage. " "Could not sync enough journals to persistent storage. "
+ "Unsynced transactions: " + (txid - synctxid); + "Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception()); LOG.fatal(msg, new Exception());
IOUtils.cleanup(LOG, journalSet);
terminate(1, msg); terminate(1, msg);
} }
} }

View File

@ -653,7 +653,7 @@ public class NameNode {
} }
} catch (ServiceFailedException e) { } catch (ServiceFailedException e) {
LOG.warn("Encountered exception while exiting state ", e); LOG.warn("Encountered exception while exiting state ", e);
} } finally {
stopCommonServices(); stopCommonServices();
if (metrics != null) { if (metrics != null) {
metrics.shutdown(); metrics.shutdown();
@ -662,6 +662,7 @@ public class NameNode {
namesystem.shutdown(); namesystem.shutdown();
} }
} }
}
synchronized boolean isStopRequested() { synchronized boolean isStopRequested() {
return stopRequested; return stopRequested;

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream; import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.io.IOUtils;
/** /**
* OfflineEditsBinaryLoader loads edits from a binary edits file * OfflineEditsBinaryLoader loads edits from a binary edits file
@ -59,6 +60,7 @@ class OfflineEditsBinaryLoader implements OfflineEditsLoader {
*/ */
@Override @Override
public void loadEdits() throws IOException { public void loadEdits() throws IOException {
try {
visitor.start(inputStream.getVersion()); visitor.start(inputStream.getVersion());
while (true) { while (true) {
try { try {
@ -79,7 +81,8 @@ class OfflineEditsBinaryLoader implements OfflineEditsLoader {
} catch (IOException e) { } catch (IOException e) {
if (!recoveryMode) { if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception // Tell the visitor to clean up, then re-throw the exception
LOG.error("Got IOException at position " + inputStream.getPosition()); LOG.error("Got IOException at position " +
inputStream.getPosition());
visitor.close(e); visitor.close(e);
throw e; throw e;
} }
@ -88,7 +91,8 @@ class OfflineEditsBinaryLoader implements OfflineEditsLoader {
} catch (RuntimeException e) { } catch (RuntimeException e) {
if (!recoveryMode) { if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception // Tell the visitor to clean up, then re-throw the exception
LOG.error("Got RuntimeException at position " + inputStream.getPosition()); LOG.error("Got RuntimeException at position " +
inputStream.getPosition());
visitor.close(e); visitor.close(e);
throw e; throw e;
} }
@ -97,5 +101,8 @@ class OfflineEditsBinaryLoader implements OfflineEditsLoader {
} }
} }
visitor.close(null); visitor.close(null);
} finally {
IOUtils.cleanup(LOG, inputStream);
}
} }
} }

View File

@ -48,6 +48,8 @@ import java.util.Random;
import java.util.Set; import java.util.Set;
import java.util.concurrent.TimeoutException; import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys; import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -92,6 +94,8 @@ import com.google.common.base.Joiner;
/** Utilities for HDFS tests */ /** Utilities for HDFS tests */
public class DFSTestUtil { public class DFSTestUtil {
private static final Log LOG = LogFactory.getLog(DFSTestUtil.class);
private static Random gen = new Random(); private static Random gen = new Random();
private static String[] dirNames = { private static String[] dirNames = {
"zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine" "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
@ -710,7 +714,11 @@ public class DFSTestUtil {
File file = new File(filename); File file = new File(filename);
DataInputStream in = new DataInputStream(new FileInputStream(file)); DataInputStream in = new DataInputStream(new FileInputStream(file));
byte[] content = new byte[(int)file.length()]; byte[] content = new byte[(int)file.length()];
try {
in.readFully(content); in.readFully(content);
} finally {
IOUtils.cleanup(LOG, in);
}
return content; return content;
} }

View File

@ -634,6 +634,7 @@ public class TestDistributedFileSystem {
true); true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2).build(); .numDataNodes(2).build();
try {
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
// Create two files // Create two files
Path tmpFile1 = new Path("/tmpfile1.dat"); Path tmpFile1 = new Path("/tmpfile1.dat");
@ -673,6 +674,9 @@ public class TestDistributedFileSystem {
id.isValid()); id.isValid());
} }
} }
} finally {
cluster.shutdown();
}
} }
/** /**
@ -686,6 +690,7 @@ public class TestDistributedFileSystem {
true); true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2).build(); .numDataNodes(2).build();
try {
cluster.getDataNodes(); cluster.getDataNodes();
DistributedFileSystem fs = cluster.getFileSystem(); DistributedFileSystem fs = cluster.getFileSystem();
// Create a file // Create a file
@ -708,6 +713,9 @@ public class TestDistributedFileSystem {
assertTrue("Expected one valid and one invalid replica", assertTrue("Expected one valid and one invalid replica",
(l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid())); (l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid()));
} }
} finally {
cluster.shutdown();
}
} }
@Test @Test

View File

@ -255,7 +255,6 @@ public class TestEditLogJournalFailures {
doThrow(new IOException("fail on setReadyToFlush()")).when(spyElos) doThrow(new IOException("fail on setReadyToFlush()")).when(spyElos)
.setReadyToFlush(); .setReadyToFlush();
} }
doNothing().when(spyElos).abort();
} }
private EditLogFileOutputStream spyOnStream(JournalAndStream jas) { private EditLogFileOutputStream spyOnStream(JournalAndStream jas) {

View File

@ -545,7 +545,11 @@ public class TestStartup {
.manageDataDfsDirs(false) .manageDataDfsDirs(false)
.manageNameDfsDirs(false) .manageNameDfsDirs(false)
.build(); .build();
try {
cluster.waitActive(); cluster.waitActive();
} finally {
cluster.shutdown();
}
} }
/** /**