diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0677bf6ffd3..e755113e359 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -615,6 +615,9 @@ Release 2.6.0 - UNRELEASED HDFS-7218. FSNamesystem ACL operations should write to audit log on failure. (clamb via yliu) + HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw + NullPointerException. (szetszwo via suresh) + BREAKDOWN OF HDFS-6581 SUBTASKS AND RELATED JIRAS HDFS-6921. Add LazyPersist flag to FileStatus. (Arpit Agarwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 82572e0df84..a7dd9223584 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1522,7 +1522,7 @@ public class DataNode extends ReconfigurableBase try { fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0); - fis[1] = (FileInputStream)data.getMetaDataInputStream(blk).getWrappedStream(); + fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data); } catch (ClassCastException e) { LOG.debug("requestShortCircuitFdsForRead failed", e); throw new ShortCircuitFdsUnsupportedException("This DataNode's " + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java index bd1ba2f0908..746c3f6948f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeUtil.java @@ -18,10 +18,15 @@ package org.apache.hadoop.hdfs.server.datanode; import java.io.File; +import java.io.FileInputStream; +import java.io.FileNotFoundException; import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi; +import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream; /** Provide utility methods for Datanode. */ @InterfaceAudience.Private @@ -114,4 +119,20 @@ public class DatanodeUtil { DataStorage.BLOCK_SUBDIR_PREFIX + d2; return new File(root, path); } + + /** + * @return the FileInputStream for the meta data of the given block. + * @throws FileNotFoundException + * if the file not found. + * @throws ClassCastException + * if the underlying input stream is not a FileInputStream. + */ + public static FileInputStream getMetaDataInputStream( + ExtendedBlock b, FsDatasetSpi data) throws IOException { + final LengthInputStream lin = data.getMetaDataInputStream(b); + if (lin == null) { + throw new FileNotFoundException("Meta file for " + b + " not found."); + } + return (FileInputStream)lin.getWrappedStream(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java index 4acfc8f01a8..c6408e6d5ec 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetCache.java @@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.ExtendedBlockId; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil; import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.util.Time; import org.slf4j.Logger; @@ -373,8 +374,7 @@ public class FsDatasetCache { reservedBytes = true; try { blockIn = (FileInputStream)dataset.getBlockInputStream(extBlk, 0); - metaIn = (FileInputStream)dataset.getMetaDataInputStream(extBlk) - .getWrappedStream(); + metaIn = DatanodeUtil.getMetaDataInputStream(extBlk, dataset); } catch (ClassCastException e) { LOG.warn("Failed to cache " + key + ": Underlying blocks are not backed by files.", e);