Revert HADOOP-8491

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1349135 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-06-12 06:23:43 +00:00
parent 4c831ec671
commit 2acc997daa
4 changed files with 6 additions and 85 deletions

View File

@ -80,12 +80,6 @@ Release 2.0.1-alpha - UNRELEASED
HADOOP-8485. Don't hardcode "Apache Hadoop 0.23" in the docs. (eli) HADOOP-8485. Don't hardcode "Apache Hadoop 0.23" in the docs. (eli)
HADOOP-8488. test-patch.sh gives +1 even if the native build fails.
(Colin Patrick McCabe via eli)
HADOOP-8491. Check for short writes when using FileChannel#write
and related methods. (Colin Patrick McCabe via eli)
BREAKDOWN OF HDFS-3042 SUBTASKS BREAKDOWN OF HDFS-3042 SUBTASKS
HADOOP-8220. ZKFailoverController doesn't handle failure to become active HADOOP-8220. ZKFailoverController doesn't handle failure to become active
@ -118,6 +112,9 @@ Release 2.0.1-alpha - UNRELEASED
HADOOP-8405. ZKFC tests leak ZK instances. (todd) HADOOP-8405. ZKFC tests leak ZK instances. (todd)
HADOOP-8488. test-patch.sh gives +1 even if the native build
fails. (Colin Patrick McCabe via eli)
Release 2.0.0-alpha - 05-23-2012 Release 2.0.0-alpha - 05-23-2012
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -20,9 +20,6 @@ package org.apache.hadoop.io;
import java.io.*; import java.io.*;
import java.net.Socket; import java.net.Socket;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.channels.WritableByteChannel;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -248,35 +245,4 @@ public class IOUtils {
public void write(int b) throws IOException { public void write(int b) throws IOException {
} }
} }
/**
* Write a ByteBuffer to a WritableByteChannel, handling short writes.
*
* @param bc The WritableByteChannel to write to.
* @param buf The input buffer
* @param offset The offset in the file to start writing at.
* @throws IOException On I/O error.
*/
public static void writeFully(WritableByteChannel bc, ByteBuffer buf)
throws IOException {
do {
bc.write(buf);
} while (buf.remaining() > 0);
}
/**
* Write a ByteBuffer to a FileChannel at a given offset,
* handling short writes.
*
* @param fc The FileChannel to write to.
* @param buf The input buffer
* @param offset The offset in the file to start writing at.
* @throws IOException On I/O error.
*/
public static void writeFully(FileChannel fc, ByteBuffer buf,
long offset) throws IOException {
do {
offset += fc.write(buf, offset);
} while (buf.remaining() > 0);
}
} }

View File

@ -21,13 +21,9 @@ package org.apache.hadoop.io;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.File;
import java.io.IOException; import java.io.IOException;
import java.io.InputStream; import java.io.InputStream;
import java.io.OutputStream; import java.io.OutputStream;
import java.io.RandomAccessFile;
import java.nio.ByteBuffer;
import java.nio.channels.FileChannel;
import org.junit.Test; import org.junit.Test;
import org.mockito.Mockito; import org.mockito.Mockito;
@ -36,8 +32,7 @@ import org.mockito.Mockito;
* Test cases for IOUtils.java * Test cases for IOUtils.java
*/ */
public class TestIOUtils { public class TestIOUtils {
private static final String TEST_FILE_NAME = "test_file";
@Test @Test
public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception { public void testCopyBytesShouldCloseStreamsWhenCloseIsTrue() throws Exception {
InputStream inputStream = Mockito.mock(InputStream.class); InputStream inputStream = Mockito.mock(InputStream.class);
@ -115,41 +110,4 @@ public class TestIOUtils {
Mockito.verify(outputStream, Mockito.atLeastOnce()).close(); Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
} }
@Test
public void testWriteFully() throws IOException {
final int INPUT_BUFFER_LEN = 10000;
final int HALFWAY = 1 + (INPUT_BUFFER_LEN / 2);
byte[] input = new byte[INPUT_BUFFER_LEN];
for (int i = 0; i < input.length; i++) {
input[i] = (byte)(i & 0xff);
}
byte[] output = new byte[input.length];
try {
RandomAccessFile raf = new RandomAccessFile(TEST_FILE_NAME, "rw");
FileChannel fc = raf.getChannel();
ByteBuffer buf = ByteBuffer.wrap(input);
IOUtils.writeFully(fc, buf);
raf.seek(0);
raf.read(output);
for (int i = 0; i < input.length; i++) {
assertEquals(input[i], output[i]);
}
buf.rewind();
IOUtils.writeFully(fc, buf, HALFWAY);
for (int i = 0; i < HALFWAY; i++) {
assertEquals(input[i], output[i]);
}
raf.seek(0);
raf.read(output);
for (int i = HALFWAY; i < input.length; i++) {
assertEquals(input[i - HALFWAY], output[i]);
}
} finally {
File f = new File(TEST_FILE_NAME);
if (f.exists()) {
f.delete();
}
}
}
} }

View File

@ -206,10 +206,10 @@ public class EditLogFileOutputStream extends EditLogOutputStream {
+ fc.size()); + fc.size());
} }
fill.position(0); fill.position(0);
IOUtils.writeFully(fc, fill, position); int written = fc.write(fill, position);
if(FSNamesystem.LOG.isDebugEnabled()) { if(FSNamesystem.LOG.isDebugEnabled()) {
FSNamesystem.LOG.debug("Edit log size is now " + fc.size() + FSNamesystem.LOG.debug("Edit log size is now " + fc.size() +
" written " + fill.capacity() + " bytes " + " at offset " + position); " written " + written + " bytes " + " at offset " + position);
} }
} }
} }