HDFS-3531. EditLogFileOutputStream#preallocate should check for incomplete writes. Contributed by Colin Patrick McCabe
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1349616 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
5a2e0ee4d2
commit
07b8584431
|
@ -20,6 +20,9 @@ package org.apache.hadoop.io;
|
||||||
|
|
||||||
import java.io.*;
|
import java.io.*;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.channels.FileChannel;
|
||||||
|
import java.nio.channels.WritableByteChannel;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
|
|
||||||
|
@ -245,4 +248,34 @@ public class IOUtils {
|
||||||
public void write(int b) throws IOException {
|
public void write(int b) throws IOException {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Write a ByteBuffer to a WritableByteChannel, handling short writes.
|
||||||
|
*
|
||||||
|
* @param bc The WritableByteChannel to write to
|
||||||
|
* @param buf The input buffer
|
||||||
|
* @throws IOException On I/O error
|
||||||
|
*/
|
||||||
|
public static void writeFully(WritableByteChannel bc, ByteBuffer buf)
|
||||||
|
throws IOException {
|
||||||
|
do {
|
||||||
|
bc.write(buf);
|
||||||
|
} while (buf.remaining() > 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Write a ByteBuffer to a FileChannel at a given offset,
|
||||||
|
* handling short writes.
|
||||||
|
*
|
||||||
|
* @param fc The FileChannel to write to
|
||||||
|
* @param buf The input buffer
|
||||||
|
* @param offset The offset in the file to start writing at
|
||||||
|
* @throws IOException On I/O error
|
||||||
|
*/
|
||||||
|
public static void writeFully(FileChannel fc, ByteBuffer buf,
|
||||||
|
long offset) throws IOException {
|
||||||
|
do {
|
||||||
|
offset += fc.write(buf, offset);
|
||||||
|
} while (buf.remaining() > 0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,9 +21,13 @@ package org.apache.hadoop.io;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertEquals;
|
||||||
import static org.junit.Assert.fail;
|
import static org.junit.Assert.fail;
|
||||||
|
|
||||||
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
|
import java.io.RandomAccessFile;
|
||||||
|
import java.nio.ByteBuffer;
|
||||||
|
import java.nio.channels.FileChannel;
|
||||||
|
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
@ -32,6 +36,7 @@ import org.mockito.Mockito;
|
||||||
* Test cases for IOUtils.java
|
* Test cases for IOUtils.java
|
||||||
*/
|
*/
|
||||||
public class TestIOUtils {
|
public class TestIOUtils {
|
||||||
|
private static final String TEST_FILE_NAME = "test_file";
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
|
public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
|
||||||
|
@ -110,4 +115,41 @@ public class TestIOUtils {
|
||||||
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
|
Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testWriteFully() throws IOException {
|
||||||
|
final int INPUT_BUFFER_LEN = 10000;
|
||||||
|
final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
|
||||||
|
byte[] input = new byte[INPUT_BUFFER_LEN];
|
||||||
|
for (int i = 0; i < input.length; i++) {
|
||||||
|
input[i] = (byte)(i & 0xff);
|
||||||
|
}
|
||||||
|
byte[] output = new byte[input.length];
|
||||||
|
|
||||||
|
try {
|
||||||
|
RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
|
||||||
|
FileChannel fc = raf.getChannel();
|
||||||
|
ByteBuffer buf = ByteBuffer.wrap(input);
|
||||||
|
IOUtils.writeFully(fc, buf);
|
||||||
|
raf.seek(0);
|
||||||
|
raf.read(output);
|
||||||
|
for (int i = 0; i < input.length; i++) {
|
||||||
|
assertEquals(input[i], output[i]);
|
||||||
|
}
|
||||||
|
buf.rewind();
|
||||||
|
IOUtils.writeFully(fc, buf, HALFWAY);
|
||||||
|
for (int i = 0; i < HALFWAY; i++) {
|
||||||
|
assertEquals(input[i], output[i]);
|
||||||
|
}
|
||||||
|
raf.seek(0);
|
||||||
|
raf.read(output);
|
||||||
|
for (int i = HALFWAY; i < input.length; i++) {
|
||||||
|
assertEquals(input[i - HALFWAY], output[i]);
|
||||||
|
}
|
||||||
|
} finally {
|
||||||
|
File f = new File(TEST_FILE_NAME);
|
||||||
|
if (f.exists()) {
|
||||||
|
f.delete();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -328,6 +328,9 @@ Branch-2 ( Unreleased changes )
|
||||||
HDFS-3522. If a namenode is in safemode, it should throw SafeModeException
|
HDFS-3522. If a namenode is in safemode, it should throw SafeModeException
|
||||||
when getBlockLocations has zero locations. (Brandon Li via szetszwo)
|
when getBlockLocations has zero locations. (Brandon Li via szetszwo)
|
||||||
|
|
||||||
|
HDFS-3531. EditLogFileOutputStream#preallocate should check for
|
||||||
|
incomplete writes. (Colin Patrick McCabe via eli)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-3042 SUBTASKS
|
BREAKDOWN OF HDFS-3042 SUBTASKS
|
||||||
|
|
||||||
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
|
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)
|
||||||
|
|
|
@ -206,10 +206,10 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
|
||||||
+ fc.size());
|
+ fc.size());
|
||||||
}
|
}
|
||||||
fill.position(0);
|
fill.position(0);
|
||||||
int written = fc.write(fill, position);
|
IOUtils.writeFully(fc, fill, position);
|
||||||
if(FSNamesystem.LOG.isDebugEnabled()) {
|
if(FSNamesystem.LOG.isDebugEnabled()) {
|
||||||
FSNamesystem.LOG.debug("Edit log size is now " + fc.size() +
|
FSNamesystem.LOG.debug("Edit log size is now " + fc.size() +
|
||||||
" written " + written + " bytes " + " at offset " + position);
|
" written " + fill.capacity() + " bytes " + " at offset " + position);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue