diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java index 21c25bd4c51..e5361a80efe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/wsrs/InputStreamEntity.java @@ -42,7 +42,10 @@ public class InputStreamEntity implements StreamingOutput { @Override public void write(OutputStream os) throws IOException { - IOUtils.skipFully(is, offset); + long skipped = is.skip(offset); + if (skipped < offset) { + throw new IOException("Requested offset beyond stream size"); + } if (len == -1) { IOUtils.copyBytes(is, os, 4096, true); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 34c3bc27be0..427cbe35b1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -159,9 +159,6 @@ Release 2.0.1-alpha - UNRELEASED HDFS-3505. DirectoryScanner does not join all threads in shutdown. (Colin Patrick McCabe via eli) - HDFS-3492. Fix some misuses of InputStream#skip (Colin Patrick McCabe - via todd) - HDFS-3485. DataTransferThrottler will over-throttle when currentTimeMillis jumps (Andy Isaacson via todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java index 53efbf4685b..bd1eeeb92ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java @@ -39,7 +39,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.BlockMetadataHeader; import org.apache.hadoop.hdfs.util.DirectBufferPool; import org.apache.hadoop.ipc.RPC; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.DataChecksum; @@ -285,11 +284,24 @@ class BlockReaderLocal implements BlockReader { //Initially the buffers have nothing to read. dataBuff.flip(); checksumBuff.flip(); - IOUtils.skipFully(dataIn, firstChunkOffset); + long toSkip = firstChunkOffset; + while (toSkip > 0) { + long skipped = dataIn.skip(toSkip); + if (skipped == 0) { + throw new IOException("Couldn't initialize input stream"); + } + toSkip -= skipped; + } if (checksumIn != null) { long checkSumOffset = (firstChunkOffset / bytesPerChecksum) * checksumSize; - IOUtils.skipFully(dataIn, checkSumOffset); + while (checkSumOffset > 0) { + long skipped = checksumIn.skip(checkSumOffset); + if (skipped == 0) { + throw new IOException("Couldn't initialize checksum input stream"); + } + checkSumOffset -= skipped; + } } } @@ -395,9 +407,17 @@ class BlockReaderLocal implements BlockReader { dataBuff.clear(); checksumBuff.clear(); - IOUtils.skipFully(dataIn, toskip); - long checkSumOffset = (toskip / bytesPerChecksum) * checksumSize; - IOUtils.skipFully(checksumIn, checkSumOffset); + long dataSkipped = dataIn.skip(toskip); + if (dataSkipped != toskip) { + throw new IOException("skip error in data input stream"); + } + long checkSumOffset = (dataSkipped / bytesPerChecksum) * checksumSize; + if (checkSumOffset > 0) { + long skipped = checksumIn.skip(checkSumOffset); + if (skipped != checkSumOffset) { + throw new IOException("skip error in checksum input stream"); + } + } // read into the middle of the chunk if (skipBuf == null) { @@ -450,4 +470,4 @@ class BlockReaderLocal implements BlockReader { public boolean hasSentStatusCode() { return false; } -} +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java index 2aa8f736c16..489f030e13f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java @@ -44,7 +44,6 @@ import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.ArrayWritable; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableFactories; @@ -2290,11 +2289,9 @@ public abstract class FSEditLogOp { // 0xff, we want to skip over that region, because there's nothing // interesting there. long numSkip = e.getNumAfterTerminator(); - try { - IOUtils.skipFully(in, numSkip); - } catch (IOException t) { + if (in.skip(numSkip) < numSkip) { FSImage.LOG.error("Failed to skip " + numSkip + " bytes of " + - "garbage after an OP_INVALID. Unexpected early EOF.", t); + "garbage after an OP_INVALID. Unexpected early EOF."); return null; } } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java index 774a10be79c..f4052bb148b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestShortCircuitLocalRead.java @@ -38,7 +38,6 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.junit.Assert; @@ -93,7 +92,8 @@ public class TestShortCircuitLocalRead { // Now read using a different API. actual = new byte[expected.length-readOffset]; stm = fs.open(name); - IOUtils.skipFully(stm, readOffset); + long skipped = stm.skip(readOffset); + Assert.assertEquals(skipped, readOffset); //Read a small number of bytes first. int nread = stm.read(actual, 0, 3); nread += stm.read(actual, nread, 2); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java index d4a7b3a2572..e69b1c30214 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java @@ -47,7 +47,6 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.RollingLogs; import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean; import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock; import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.util.DataChecksum; import org.apache.hadoop.util.DiskChecker.DiskErrorException; @@ -687,7 +686,7 @@ public class SimulatedFSDataset implements FsDatasetSpi { public synchronized InputStream getBlockInputStream(ExtendedBlock b, long seekOffset) throws IOException { InputStream result = getBlockInputStream(b); - IOUtils.skipFully(result, seekOffset); + result.skip(seekOffset); return result; }