HADOOP-8158. Interrupting hadoop fs -put from the command line causes a LeaseExpiredException. Contributed by Daryn Sharp. (harsh)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1389010 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Harsh J 2012-09-23 10:50:44 +00:00
parent ac31d6a448
commit 9c9b3f09ed
4 changed files with 46 additions and 4 deletions

View File

@ -219,6 +219,9 @@ Trunk (Unreleased)
HDFS-3678. Edit log files are never being purged from 2NN. (atm)
HADOOP-8158. Interrupting hadoop fs -put from the command line
causes a LeaseExpiredException. (daryn via harsh)
Release 2.0.3-alpha - Unreleased
INCOMPATIBLE CHANGES

View File

@ -77,6 +77,7 @@ import javax.net.SocketFactory;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.BlockStorageLocation;
@ -695,6 +696,17 @@ public class DFSClient implements java.io.Closeable {
}
}
/**
* Close all open streams, abandoning all of the leases and files being
* created.
* @param abort whether streams should be gracefully closed
*/
public void closeOutputStreams(boolean abort) {
if (clientRunning) {
closeAllFilesBeingWritten(abort);
}
}
/**
* Get the default block size for this cluster
* @return the default block size in bytes

View File

@ -535,10 +535,10 @@ public class DistributedFileSystem extends FileSystem {
@Override
public void close() throws IOException {
try {
super.processDeleteOnExit();
dfs.close();
} finally {
dfs.closeOutputStreams(false);
super.close();
} finally {
dfs.close();
}
}

View File

@ -22,7 +22,9 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
import static org.junit.Assert.fail;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.inOrder;
import static org.mockito.Mockito.mock;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.net.URI;
@ -54,6 +56,7 @@ import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.Time;
import org.apache.log4j.Level;
import org.junit.Test;
import org.mockito.InOrder;
public class TestDistributedFileSystem {
private static final Random RAN = new Random();
@ -127,7 +130,31 @@ public class TestDistributedFileSystem {
if (cluster != null) {cluster.shutdown();}
}
}
@Test
public void testDFSCloseOrdering() throws Exception {
DistributedFileSystem fs = new MyDistributedFileSystem();
Path path = new Path("/a");
fs.deleteOnExit(path);
fs.close();
InOrder inOrder = inOrder(fs.dfs);
inOrder.verify(fs.dfs).closeOutputStreams(eq(false));
inOrder.verify(fs.dfs).delete(eq(path.toString()), eq(true));
inOrder.verify(fs.dfs).close();
}
private static class MyDistributedFileSystem extends DistributedFileSystem {
MyDistributedFileSystem() {
statistics = new FileSystem.Statistics("myhdfs"); // can't mock finals
dfs = mock(DFSClient.class);
}
@Override
public boolean exists(Path p) {
return true; // trick out deleteOnExit
}
}
@Test
public void testDFSSeekExceptions() throws IOException {
Configuration conf = getTestConfiguration();