HDFS-2132. Potential resource leak in EditLogFileOutputStream.close. (atm)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1145428 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Aaron Myers 2011-07-12 02:18:30 +00:00
parent 12b8d9823d
commit 1ba3ddbe6d
3 changed files with 76 additions and 25 deletions

View File

@ -818,6 +818,8 @@ Trunk (unreleased changes)
reading only from a currently being written block. (John George via reading only from a currently being written block. (John George via
szetszwo) szetszwo)
HDFS-2132. Potential resource leak in EditLogFileOutputStream.close. (atm)
Release 0.22.0 - Unreleased Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -28,8 +28,11 @@ import java.util.zip.Checksum;
import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.Writable;
import com.google.common.annotations.VisibleForTesting;
/** /**
* An implementation of the abstract class {@link EditLogOutputStream}, which * An implementation of the abstract class {@link EditLogOutputStream}, which
* stores edits in a local file. * stores edits in a local file.
@ -120,32 +123,41 @@ class EditLogFileOutputStream extends EditLogOutputStream {
@Override @Override
public void close() throws IOException { public void close() throws IOException {
// close should have been called after all pending transactions try {
// have been flushed & synced. // close should have been called after all pending transactions
// if already closed, just skip // have been flushed & synced.
if(bufCurrent != null) // if already closed, just skip
{ if(bufCurrent != null)
int bufSize = bufCurrent.size(); {
if (bufSize != 0) { int bufSize = bufCurrent.size();
throw new IOException("FSEditStream has " + bufSize if (bufSize != 0) {
+ " bytes still to be flushed and cannot " + "be closed."); throw new IOException("FSEditStream has " + bufSize
+ " bytes still to be flushed and cannot " + "be closed.");
}
bufCurrent.close();
bufCurrent = null;
} }
bufCurrent.close();
bufCurrent = null; if(bufReady != null) {
} bufReady.close();
bufReady = null;
if(bufReady != null) { }
bufReady.close();
bufReady = null; // remove the last INVALID marker from transaction log.
} if (fc != null && fc.isOpen()) {
fc.truncate(fc.position());
// remove the last INVALID marker from transaction log. fc.close();
if (fc != null && fc.isOpen()) { fc = null;
fc.truncate(fc.position()); }
fc.close(); if (fp != null) {
} fp.close();
if (fp != null) { fp = null;
fp.close(); }
} finally {
IOUtils.cleanup(FSNamesystem.LOG, bufCurrent, bufReady, fc, fp);
bufCurrent = bufReady = null;
fc = null;
fp = null;
} }
} }
@ -225,4 +237,14 @@ class EditLogFileOutputStream extends EditLogOutputStream {
File getFile() { File getFile() {
return file; return file;
} }
@VisibleForTesting
public void setFileChannelForTesting(FileChannel fc) {
this.fc = fc;
}
@VisibleForTesting
public FileChannel getFileChannelForTesting() {
return fc;
}
} }

View File

@ -20,9 +20,11 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import java.io.File; import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.nio.channels.FileChannel;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.DU; import org.apache.hadoop.fs.DU;
@ -31,6 +33,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito;
public class TestEditLogFileOutputStream { public class TestEditLogFileOutputStream {
@ -58,5 +61,29 @@ public class TestEditLogFileOutputStream {
assertTrue("Edit log disk space used should be at least 257 blocks", assertTrue("Edit log disk space used should be at least 257 blocks",
257 * 4096 <= new DU(editLog, conf).getUsed()); 257 * 4096 <= new DU(editLog, conf).getUsed());
} }
@Test
public void testClose() throws IOException {
String errorMessage = "TESTING: fc.truncate() threw IOE";
File testDir = new File(System.getProperty("test.build.data", "/tmp"));
assertTrue("could not create test directory", testDir.exists() || testDir.mkdirs());
File f = new File(testDir, "edits");
assertTrue("could not create test file", f.createNewFile());
EditLogFileOutputStream elos = new EditLogFileOutputStream(f, 0);
FileChannel mockFc = Mockito.spy(elos.getFileChannelForTesting());
Mockito.doThrow(new IOException(errorMessage)).when(mockFc).truncate(Mockito.anyLong());
elos.setFileChannelForTesting(mockFc);
try {
elos.close();
fail("elos.close() succeeded, but should have thrown");
} catch (IOException e) {
assertEquals("wrong IOE thrown from elos.close()", e.getMessage(), errorMessage);
}
assertEquals("fc was not nulled when elos.close() failed", elos.getFileChannelForTesting(), null);
}
} }