Merge r1327788 and r1327790 from trunk for HDFS-3282. Add HdfsDataInputStream as a public API.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1330436 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2012-04-25 17:39:13 +00:00
parent a7e688c6a8
commit b3787c3d87
5 changed files with 84 additions and 37 deletions

View File

@ -79,6 +79,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli) HDFS-3004. Implement Recovery Mode. (Colin Patrick McCabe via eli)
HDFS-3282. Add HdfsDataInputStream as a public API. (umamahesh)
IMPROVEMENTS IMPROVEMENTS
HDFS-2018. Move all journal stream management code into one place. HDFS-2018. Move all journal stream management code into one place.

View File

@ -80,7 +80,6 @@ import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException; import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.FileSystem;
@ -93,7 +92,7 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks; import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException; import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@ -1849,41 +1848,13 @@ public class DFSClient implements java.io.Closeable {
} }
/** /**
* The Hdfs implementation of {@link FSDataInputStream} * @deprecated use {@link HdfsDataInputStream} instead.
*/ */
@InterfaceAudience.Private @Deprecated
public static class DFSDataInputStream extends FSDataInputStream { public static class DFSDataInputStream extends HdfsDataInputStream {
public DFSDataInputStream(DFSInputStream in)
throws IOException {
super(in);
}
/**
* Returns the datanode from which the stream is currently reading.
*/
public DatanodeInfo getCurrentDatanode() {
return ((DFSInputStream)in).getCurrentDatanode();
}
/**
* Returns the block containing the target position.
*/
public ExtendedBlock getCurrentBlock() {
return ((DFSInputStream)in).getCurrentBlock();
}
/** public DFSDataInputStream(DFSInputStream in) throws IOException {
* Return collection of blocks that has already been located. super(in);
*/
synchronized List<LocatedBlock> getAllBlocks() throws IOException {
return ((DFSInputStream)in).getAllBlocks();
}
/**
* @return The visible length of the file.
*/
public long getVisibleLength() throws IOException {
return ((DFSInputStream)in).getFileLength();
} }
} }

View File

@ -222,7 +222,7 @@ public class DFSInputStream extends FSInputStream {
/** /**
* Return collection of blocks that has already been located. * Return collection of blocks that has already been located.
*/ */
synchronized List<LocatedBlock> getAllBlocks() throws IOException { public synchronized List<LocatedBlock> getAllBlocks() throws IOException {
return getBlockRange(0, getFileLength()); return getBlockRange(0, getFileLength());
} }

View File

@ -46,6 +46,7 @@ import org.apache.hadoop.fs.PathFilter;
import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -205,8 +206,9 @@ public class DistributedFileSystem extends FileSystem {
return dfs.recoverLease(getPathName(f)); return dfs.recoverLease(getPathName(f));
} }
@SuppressWarnings("deprecation")
@Override @Override
public FSDataInputStream open(Path f, int bufferSize) throws IOException { public HdfsDataInputStream open(Path f, int bufferSize) throws IOException {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
return new DFSClient.DFSDataInputStream( return new DFSClient.DFSDataInputStream(
dfs.open(getPathName(f), bufferSize, verifyChecksum)); dfs.open(getPathName(f), bufferSize, verifyChecksum));
@ -678,6 +680,7 @@ public class DistributedFileSystem extends FileSystem {
// We do not see a need for user to report block checksum errors and do not // We do not see a need for user to report block checksum errors and do not
// want to rely on user to report block corruptions. // want to rely on user to report block corruptions.
@Deprecated @Deprecated
@SuppressWarnings("deprecation")
public boolean reportChecksumFailure(Path f, public boolean reportChecksumFailure(Path f,
FSDataInputStream in, long inPos, FSDataInputStream in, long inPos,
FSDataInputStream sums, long sumsPos) { FSDataInputStream sums, long sumsPos) {

View File

@ -0,0 +1,71 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.client;
import java.io.IOException;
import java.util.List;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.hdfs.DFSInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
/**
* The Hdfs implementation of {@link FSDataInputStream}.
*/
@InterfaceAudience.Public
@InterfaceStability.Evolving
public class HdfsDataInputStream extends FSDataInputStream {
public HdfsDataInputStream(DFSInputStream in) throws IOException {
super(in);
}
/**
* Get the datanode from which the stream is currently reading.
*/
public DatanodeInfo getCurrentDatanode() {
return ((DFSInputStream) in).getCurrentDatanode();
}
/**
* Get the block containing the target position.
*/
public ExtendedBlock getCurrentBlock() {
return ((DFSInputStream) in).getCurrentBlock();
}
/**
* Get the collection of blocks that has already been located.
*/
public synchronized List<LocatedBlock> getAllBlocks() throws IOException {
return ((DFSInputStream) in).getAllBlocks();
}
/**
* Get the visible length of the file. It will include the length of the last
* block even if that is in UnderConstruction state.
*
* @return The visible length of the file.
*/
public long getVisibleLength() throws IOException {
return ((DFSInputStream) in).getFileLength();
}
}