HDFS-7119. Split error checks in AtomicFileOutputStream#close into separate conditions to improve diagnostics. Contributed by Chris Nauroth.
(cherry picked from commit 9f9a2222a2
)
This commit is contained in:
parent
cb9283549f
commit
fd3aa38231
|
@ -243,6 +243,9 @@ Release 2.6.0 - UNRELEASED
|
|||
HDFS-6808. Add command line option to ask DataNode reload configuration.
|
||||
(Lei Xu via Colin Patrick McCabe)
|
||||
|
||||
HDFS-7119. Split error checks in AtomicFileOutputStream#close into separate
|
||||
conditions to improve diagnostics. (cnauroth)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6690. Deduplicate xattr names in memory. (wang)
|
||||
|
|
|
@ -26,6 +26,8 @@ import java.io.IOException;
|
|||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||
import org.apache.hadoop.io.nativeio.NativeIOException;
|
||||
|
||||
/**
|
||||
* A FileOutputStream that has the property that it will only show
|
||||
|
@ -73,9 +75,15 @@ public class AtomicFileOutputStream extends FilterOutputStream {
|
|||
boolean renamed = tmpFile.renameTo(origFile);
|
||||
if (!renamed) {
|
||||
// On windows, renameTo does not replace.
|
||||
if (!origFile.delete() || !tmpFile.renameTo(origFile)) {
|
||||
throw new IOException("Could not rename temporary file " +
|
||||
tmpFile + " to " + origFile);
|
||||
if (origFile.exists() && !origFile.delete()) {
|
||||
throw new IOException("Could not delete original file " + origFile);
|
||||
}
|
||||
try {
|
||||
NativeIO.renameTo(tmpFile, origFile);
|
||||
} catch (NativeIOException e) {
|
||||
throw new IOException("Could not rename temporary file " + tmpFile
|
||||
+ " to " + origFile + " due to failure in native rename. "
|
||||
+ e.toString());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -21,6 +21,7 @@ import static org.junit.Assert.assertEquals;
|
|||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
import static org.junit.Assume.assumeTrue;
|
||||
|
||||
import java.io.File;
|
||||
import java.io.FileNotFoundException;
|
||||
|
@ -30,9 +31,13 @@ import java.io.OutputStream;
|
|||
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
import org.apache.hadoop.hdfs.DFSTestUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.test.PathUtils;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
import org.junit.Before;
|
||||
import org.junit.Rule;
|
||||
import org.junit.Test;
|
||||
import org.junit.rules.ExpectedException;
|
||||
|
||||
import com.google.common.base.Joiner;
|
||||
|
||||
|
@ -45,6 +50,9 @@ public class TestAtomicFileOutputStream {
|
|||
|
||||
private static final File DST_FILE = new File(TEST_DIR, "test.txt");
|
||||
|
||||
@Rule
|
||||
public ExpectedException exception = ExpectedException.none();
|
||||
|
||||
@Before
|
||||
public void cleanupTestDir() throws IOException {
|
||||
assertTrue(TEST_DIR.exists() || TEST_DIR.mkdirs());
|
||||
|
@ -119,6 +127,27 @@ public class TestAtomicFileOutputStream {
|
|||
DST_FILE.getName(), Joiner.on(",").join(TEST_DIR.list()));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testFailToRename() throws IOException {
|
||||
assumeTrue(Shell.WINDOWS);
|
||||
OutputStream fos = null;
|
||||
try {
|
||||
fos = new AtomicFileOutputStream(DST_FILE);
|
||||
fos.write(TEST_STRING.getBytes());
|
||||
FileUtil.setWritable(TEST_DIR, false);
|
||||
exception.expect(IOException.class);
|
||||
exception.expectMessage("failure in native rename");
|
||||
try {
|
||||
fos.close();
|
||||
} finally {
|
||||
fos = null;
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(null, fos);
|
||||
FileUtil.setWritable(TEST_DIR, true);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a stream that fails to flush at close time
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue