HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw NullPointerException. Contributed by Tsz Wo Nicholas Sze.
Conflicts: hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
This commit is contained in:
parent
e1608c28d5
commit
c98fcd1d3b
|
@ -615,6 +615,9 @@ Release 2.6.0 - UNRELEASED
|
||||||
HDFS-7218. FSNamesystem ACL operations should write to audit log on
|
HDFS-7218. FSNamesystem ACL operations should write to audit log on
|
||||||
failure. (clamb via yliu)
|
failure. (clamb via yliu)
|
||||||
|
|
||||||
|
HDFS-7383. DataNode.requestShortCircuitFdsForRead may throw
|
||||||
|
NullPointerException. (szetszwo via suresh)
|
||||||
|
|
||||||
BREAKDOWN OF HDFS-6581 SUBTASKS AND RELATED JIRAS
|
BREAKDOWN OF HDFS-6581 SUBTASKS AND RELATED JIRAS
|
||||||
|
|
||||||
HDFS-6921. Add LazyPersist flag to FileStatus. (Arpit Agarwal)
|
HDFS-6921. Add LazyPersist flag to FileStatus. (Arpit Agarwal)
|
||||||
|
|
|
@ -1522,7 +1522,7 @@ public class DataNode extends ReconfigurableBase
|
||||||
|
|
||||||
try {
|
try {
|
||||||
fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0);
|
fis[0] = (FileInputStream)data.getBlockInputStream(blk, 0);
|
||||||
fis[1] = (FileInputStream)data.getMetaDataInputStream(blk).getWrappedStream();
|
fis[1] = DatanodeUtil.getMetaDataInputStream(blk, data);
|
||||||
} catch (ClassCastException e) {
|
} catch (ClassCastException e) {
|
||||||
LOG.debug("requestShortCircuitFdsForRead failed", e);
|
LOG.debug("requestShortCircuitFdsForRead failed", e);
|
||||||
throw new ShortCircuitFdsUnsupportedException("This DataNode's " +
|
throw new ShortCircuitFdsUnsupportedException("This DataNode's " +
|
||||||
|
|
|
@ -18,10 +18,15 @@
|
||||||
package org.apache.hadoop.hdfs.server.datanode;
|
package org.apache.hadoop.hdfs.server.datanode;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.io.FileInputStream;
|
||||||
|
import java.io.FileNotFoundException;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience;
|
import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.LengthInputStream;
|
||||||
|
|
||||||
/** Provide utility methods for Datanode. */
|
/** Provide utility methods for Datanode. */
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
|
@ -114,4 +119,20 @@ public class DatanodeUtil {
|
||||||
DataStorage.BLOCK_SUBDIR_PREFIX + d2;
|
DataStorage.BLOCK_SUBDIR_PREFIX + d2;
|
||||||
return new File(root, path);
|
return new File(root, path);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return the FileInputStream for the meta data of the given block.
|
||||||
|
* @throws FileNotFoundException
|
||||||
|
* if the file not found.
|
||||||
|
* @throws ClassCastException
|
||||||
|
* if the underlying input stream is not a FileInputStream.
|
||||||
|
*/
|
||||||
|
public static FileInputStream getMetaDataInputStream(
|
||||||
|
ExtendedBlock b, FsDatasetSpi<?> data) throws IOException {
|
||||||
|
final LengthInputStream lin = data.getMetaDataInputStream(b);
|
||||||
|
if (lin == null) {
|
||||||
|
throw new FileNotFoundException("Meta file for " + b + " not found.");
|
||||||
|
}
|
||||||
|
return (FileInputStream)lin.getWrappedStream();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -51,6 +51,7 @@ import org.apache.hadoop.hdfs.ExtendedBlockId;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.server.datanode.DatanodeUtil;
|
||||||
import org.apache.hadoop.io.nativeio.NativeIO;
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -373,8 +374,7 @@ public class FsDatasetCache {
|
||||||
reservedBytes = true;
|
reservedBytes = true;
|
||||||
try {
|
try {
|
||||||
blockIn = (FileInputStream)dataset.getBlockInputStream(extBlk, 0);
|
blockIn = (FileInputStream)dataset.getBlockInputStream(extBlk, 0);
|
||||||
metaIn = (FileInputStream)dataset.getMetaDataInputStream(extBlk)
|
metaIn = DatanodeUtil.getMetaDataInputStream(extBlk, dataset);
|
||||||
.getWrappedStream();
|
|
||||||
} catch (ClassCastException e) {
|
} catch (ClassCastException e) {
|
||||||
LOG.warn("Failed to cache " + key +
|
LOG.warn("Failed to cache " + key +
|
||||||
": Underlying blocks are not backed by files.", e);
|
": Underlying blocks are not backed by files.", e);
|
||||||
|
|
Loading…
Reference in New Issue