HDFS-7988. Replace usage of ExactSizeInputStream with LimitInputStream. Contributed by Walter Su.
This commit is contained in:
parent
70c26703f4
commit
298a8cb096
|
@ -162,13 +162,13 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
|
|||
import org.apache.hadoop.hdfs.server.protocol.StorageReport;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.ShmId;
|
||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
||||
import org.apache.hadoop.hdfs.util.ExactSizeInputStream;
|
||||
import org.apache.hadoop.io.EnumSetWritable;
|
||||
import org.apache.hadoop.io.Text;
|
||||
import org.apache.hadoop.io.erasurecode.ECSchema;
|
||||
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.LimitInputStream;
|
||||
|
||||
/**
|
||||
* Utilities for converting protobuf classes to and from hdfs-client side
|
||||
|
@ -401,7 +401,7 @@ public class PBHelperClient {
|
|||
|
||||
int size = CodedInputStream.readRawVarint32(firstByte, input);
|
||||
assert size >= 0;
|
||||
return new ExactSizeInputStream(input, size);
|
||||
return new LimitInputStream(input, size);
|
||||
}
|
||||
|
||||
public static CipherOption convert(HdfsProtos.CipherOptionProto proto) {
|
||||
|
|
|
@ -1,127 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.util;
|
||||
|
||||
import java.io.EOFException;
|
||||
import java.io.FilterInputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import org.apache.hadoop.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
|
||||
import javax.annotation.Nonnull;
|
||||
|
||||
/**
|
||||
* An InputStream implementations which reads from some other InputStream
|
||||
* but expects an exact number of bytes. Any attempts to read past the
|
||||
* specified number of bytes will return as if the end of the stream
|
||||
* was reached. If the end of the underlying stream is reached prior to
|
||||
* the specified number of bytes, an EOFException is thrown.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
@InterfaceStability.Evolving
|
||||
public class ExactSizeInputStream extends FilterInputStream {
|
||||
private int remaining;
|
||||
|
||||
/**
|
||||
* Construct an input stream that will read no more than
|
||||
* 'numBytes' bytes.
|
||||
*
|
||||
* If an EOF occurs on the underlying stream before numBytes
|
||||
* bytes have been read, an EOFException will be thrown.
|
||||
*
|
||||
* @param in the inputstream to wrap
|
||||
* @param numBytes the number of bytes to read
|
||||
*/
|
||||
public ExactSizeInputStream(InputStream in, int numBytes) {
|
||||
super(in);
|
||||
Preconditions.checkArgument(numBytes >= 0,
|
||||
"Negative expected bytes: ", numBytes);
|
||||
this.remaining = numBytes;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int available() throws IOException {
|
||||
return Math.min(super.available(), remaining);
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read() throws IOException {
|
||||
// EOF if we reached our limit
|
||||
if (remaining <= 0) {
|
||||
return -1;
|
||||
}
|
||||
final int result = super.read();
|
||||
if (result >= 0) {
|
||||
--remaining;
|
||||
} else if (remaining > 0) {
|
||||
// Underlying stream reached EOF but we haven't read the expected
|
||||
// number of bytes.
|
||||
throw new EOFException(
|
||||
"Premature EOF. Expected " + remaining + "more bytes");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public int read(@Nonnull final byte[] b, final int off, int len)
|
||||
throws IOException {
|
||||
if (remaining <= 0) {
|
||||
return -1;
|
||||
}
|
||||
len = Math.min(len, remaining);
|
||||
final int result = super.read(b, off, len);
|
||||
if (result >= 0) {
|
||||
remaining -= result;
|
||||
} else if (remaining > 0) {
|
||||
// Underlying stream reached EOF but we haven't read the expected
|
||||
// number of bytes.
|
||||
throw new EOFException(
|
||||
"Premature EOF. Expected " + remaining + "more bytes");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long skip(final long n) throws IOException {
|
||||
final long result = super.skip(Math.min(n, remaining));
|
||||
if (result > 0) {
|
||||
remaining -= result;
|
||||
} else if (remaining > 0) {
|
||||
// Underlying stream reached EOF but we haven't read the expected
|
||||
// number of bytes.
|
||||
throw new EOFException(
|
||||
"Premature EOF. Expected " + remaining + "more bytes");
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean markSupported() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void mark(int readlimit) {
|
||||
throw new UnsupportedOperationException();
|
||||
}
|
||||
|
||||
}
|
|
@ -1,129 +0,0 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.util;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.ByteArrayInputStream;
|
||||
import java.io.EOFException;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
public class TestExactSizeInputStream {
|
||||
@Test
|
||||
public void testBasicsReadSingle() throws IOException {
|
||||
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("hello"), 3);
|
||||
assertEquals(3, s.available());
|
||||
|
||||
assertEquals((int)'h', s.read());
|
||||
assertEquals((int)'e', s.read());
|
||||
assertEquals((int)'l', s.read());
|
||||
assertEquals(-1, s.read());
|
||||
assertEquals(0, s.available());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasicsReadArray() throws IOException {
|
||||
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("hello"), 3);
|
||||
assertEquals(3, s.available());
|
||||
|
||||
byte[] buf = new byte[10];
|
||||
|
||||
assertEquals(2, s.read(buf, 0, 2));
|
||||
assertEquals('h', buf[0]);
|
||||
assertEquals('e', buf[1]);
|
||||
|
||||
assertEquals(1, s.read(buf, 0, 2));
|
||||
assertEquals('l', buf[0]);
|
||||
|
||||
assertEquals(-1, s.read(buf, 0, 2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBasicsSkip() throws IOException {
|
||||
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("hello"), 3);
|
||||
assertEquals(3, s.available());
|
||||
|
||||
assertEquals(2, s.skip(2));
|
||||
assertEquals(1, s.skip(2));
|
||||
assertEquals(0, s.skip(2));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadNotEnough() throws IOException {
|
||||
// Ask for 5 bytes, only has 2
|
||||
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
|
||||
assertEquals(2, s.available());
|
||||
|
||||
assertEquals((int)'h', s.read());
|
||||
assertEquals((int)'e', s.read());
|
||||
try {
|
||||
s.read();
|
||||
fail("Read when should be out of data");
|
||||
} catch (EOFException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSkipNotEnough() throws IOException {
|
||||
// Ask for 5 bytes, only has 2
|
||||
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
|
||||
assertEquals(2, s.skip(3));
|
||||
try {
|
||||
s.skip(1);
|
||||
fail("Skip when should be out of data");
|
||||
} catch (EOFException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testReadArrayNotEnough() throws IOException {
|
||||
// Ask for 5 bytes, only has 2
|
||||
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
|
||||
byte[] buf = new byte[10];
|
||||
assertEquals(2, s.read(buf, 0, 5));
|
||||
try {
|
||||
s.read(buf, 2, 3);
|
||||
fail("Read buf when should be out of data");
|
||||
} catch (EOFException e) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testMark() throws IOException {
|
||||
ExactSizeInputStream s = new ExactSizeInputStream(byteStream("he"), 5);
|
||||
assertFalse(s.markSupported());
|
||||
try {
|
||||
s.mark(1);
|
||||
fail("Mark should not succeed");
|
||||
} catch (UnsupportedOperationException uoe) {
|
||||
// expected
|
||||
}
|
||||
}
|
||||
|
||||
private static InputStream byteStream(String data) {
|
||||
return new ByteArrayInputStream(data.getBytes());
|
||||
}
|
||||
}
|
|
@ -1670,6 +1670,9 @@ Release 2.8.0 - UNRELEASED
|
|||
|
||||
HDFS-9153. Pretty-format the output for DFSIO. (Kai Zheng via wheat9)
|
||||
|
||||
HDFS-7988. Replace usage of ExactSizeInputStream with LimitInputStream.
|
||||
(Walter Su via wheat9)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||
|
|
Loading…
Reference in New Issue