HDFS-3492. fix some misuses of InputStream#skip. Contributed by Colin Patrick McCabe

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1361450 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Eli Collins 2012-07-14 00:22:55 +00:00
parent bb00c0ce20
commit 03e8648d17
5 changed files with 16 additions and 36 deletions

View File

@ -42,10 +42,7 @@ public class InputStreamEntity implements StreamingOutput {
@Override
public void write(OutputStream os) throws IOException {
long skipped = is.skip(offset);
if (skipped < offset) {
throw new IOException("Requested offset beyond stream size");
}
IOUtils.skipFully(is, offset);
if (len == -1) {
IOUtils.copyBytes(is, os, 4096, true);
} else {

View File

@ -327,6 +327,9 @@ Release 2.0.1-alpha - UNRELEASED
HDFS-470. libhdfs should handle 0-length reads from FSInputStream
correctly. (Colin Patrick McCabe via eli)
HDFS-3492. fix some misuses of InputStream#skip.
(Colin Patrick McCabe via eli)
BREAKDOWN OF HDFS-3042 SUBTASKS
HDFS-2185. HDFS portion of ZK-based FailoverController (todd)

View File

@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader;
import org.apache.hadoop.hdfs.util.DirectBufferPool;
import org.apache.hadoop.ipc.RPC;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
@ -315,23 +316,10 @@ class BlockReaderLocal implements BlockReader {
boolean success = false;
try {
// Skip both input streams to beginning of the chunk containing startOffset
long toSkip = firstChunkOffset;
while (toSkip > 0) {
long skipped = dataIn.skip(toSkip);
if (skipped == 0) {
throw new IOException("Couldn't initialize input stream");
}
toSkip -= skipped;
}
IOUtils.skipFully(dataIn, firstChunkOffset);
if (checksumIn != null) {
long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize;
while (checkSumOffset > 0) {
long skipped = checksumIn.skip(checkSumOffset);
if (skipped == 0) {
throw new IOException("Couldn't initialize checksum input stream");
}
checkSumOffset -= skipped;
}
IOUtils.skipFully(checksumIn, checkSumOffset);
}
success = true;
} finally {
@ -636,17 +624,9 @@ class BlockReaderLocal implements BlockReader {
slowReadBuff.position(slowReadBuff.limit());
checksumBuff.position(checksumBuff.limit());
long dataSkipped = dataIn.skip(toskip);
if (dataSkipped != toskip) {
throw new IOException("skip error in data input stream");
}
long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize;
if (checkSumOffset > 0) {
long skipped = checksumIn.skip(checkSumOffset);
if (skipped != checkSumOffset) {
throw new IOException("skip error in checksum input stream");
}
}
IOUtils.skipFully(dataIn, toskip);
long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize;
IOUtils.skipFully(checksumIn, checkSumOffset);
// read into the middle of the chunk
if (skipBuf == null) {

View File

@ -40,6 +40,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.Time;
@ -95,8 +96,7 @@ public class TestShortCircuitLocalRead {
// Now read using a different API.
actual = new byte[expected.length-readOffset];
stm = fs.open(name);
long skipped = stm.skip(readOffset);
Assert.assertEquals(skipped, readOffset);
IOUtils.skipFully(stm, readOffset);
//Read a small number of bytes first.
int nread = stm.read(actual, 0, 3);
nread += stm.read(actual, nread, 2);
@ -124,8 +124,7 @@ public class TestShortCircuitLocalRead {
ByteBuffer actual = ByteBuffer.allocate(expected.length - readOffset);
long skipped = stm.skip(readOffset);
Assert.assertEquals(skipped, readOffset);
IOUtils.skipFully(stm, readOffset);
actual.limit(3);

View File

@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.DiskChecker.DiskErrorException;
@ -686,7 +687,7 @@ public class SimulatedFSDataset implements FsDatasetSpi<FsVolumeSpi> {
public synchronized InputStream getBlockInputStream(ExtendedBlock b,
long seekOffset) throws IOException {
InputStream result = getBlockInputStream(b);
result.skip(seekOffset);
IOUtils.skipFully(result, seekOffset);
return result;
}