diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index cd5e32b2be4..8863aaa7bc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -79,6 +79,8 @@ Release 2.0.0 - UNRELEASED HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli) + HDFS-3282. Add HdfsDataInputStream as a public API. (umamahesh) + IMPROVEMENTS HDFS-2018. Move all journal stream management code into one place. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index c28e3a3719d..6ec29c7890f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -80,7 +80,6 @@ import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.CreateFlag; -import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileSystem; @@ -93,7 +92,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.permission.FsPermission; - +import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; @@ -1849,41 +1848,13 @@ public class DFSClient implements java.io.Closeable { } /** - * The Hdfs implementation of {@link FSDataInputStream} + * @deprecated use {@link HdfsDataInputStream} instead. */ - @InterfaceAudience.Private - public static class DFSDataInputStream extends FSDataInputStream { - public DFSDataInputStream(DFSInputStream in) - throws IOException { - super(in); - } - - /** - * Returns the datanode from which the stream is currently reading. - */ - public DatanodeInfo getCurrentDatanode() { - return ((DFSInputStream)in).getCurrentDatanode(); - } - - /** - * Returns the block containing the target position. - */ - public ExtendedBlock getCurrentBlock() { - return ((DFSInputStream)in).getCurrentBlock(); - } + @Deprecated + public static class DFSDataInputStream extends HdfsDataInputStream { - /** - * Return collection of blocks that has already been located. - */ - synchronized List getAllBlocks() throws IOException { - return ((DFSInputStream)in).getAllBlocks(); - } - - /** - * @return The visible length of the file. - */ - public long getVisibleLength() throws IOException { - return ((DFSInputStream)in).getFileLength(); + public DFSDataInputStream(DFSInputStream in) throws IOException { + super(in); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 007e9ff2f57..e30bd537675 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -222,7 +222,7 @@ public class DFSInputStream extends FSInputStream { /** * Return collection of blocks that has already been located. */ - synchronized List getAllBlocks() throws IOException { + public synchronized List getAllBlocks() throws IOException { return getBlockRange(0, getFileLength()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index 1c10044b044..305c081835f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -46,6 +46,7 @@ import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; +import org.apache.hadoop.hdfs.client.HdfsDataInputStream; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -205,8 +206,9 @@ public class DistributedFileSystem extends FileSystem { return dfs.recoverLease(getPathName(f)); } + @SuppressWarnings("deprecation") @Override - public FSDataInputStream open(Path f, int bufferSize) throws IOException { + public HdfsDataInputStream open(Path f, int bufferSize) throws IOException { statistics.incrementReadOps(1); return new DFSClient.DFSDataInputStream( dfs.open(getPathName(f), bufferSize, verifyChecksum)); @@ -678,6 +680,7 @@ public class DistributedFileSystem extends FileSystem { // We do not see a need for user to report block checksum errors and do not // want to rely on user to report block corruptions. @Deprecated + @SuppressWarnings("deprecation") public boolean reportChecksumFailure(Path f, FSDataInputStream in, long inPos, FSDataInputStream sums, long sumsPos) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java new file mode 100644 index 00000000000..e07964a5a5c --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsDataInputStream.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.client; + +import java.io.IOException; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; + +/** + * The Hdfs implementation of {@link FSDataInputStream}. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public class HdfsDataInputStream extends FSDataInputStream { + public HdfsDataInputStream(DFSInputStream in) throws IOException { + super(in); + } + + /** + * Get the datanode from which the stream is currently reading. + */ + public DatanodeInfo getCurrentDatanode() { + return ((DFSInputStream) in).getCurrentDatanode(); + } + + /** + * Get the block containing the target position. + */ + public ExtendedBlock getCurrentBlock() { + return ((DFSInputStream) in).getCurrentBlock(); + } + + /** + * Get the collection of blocks that has already been located. + */ + public synchronized List getAllBlocks() throws IOException { + return ((DFSInputStream) in).getAllBlocks(); + } + + /** + * Get the visible length of the file. It will include the length of the last + * block even if that is in UnderConstruction state. + * + * @return The visible length of the file. + */ + public long getVisibleLength() throws IOException { + return ((DFSInputStream) in).getFileLength(); + } +}