diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index 767d36e92ed..4d3a45a2a15 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.Log; import org.apache.hadoop.fs.ChecksumException; import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader; import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeReference; @@ -322,6 +323,12 @@ class BlockSender implements java.io.Closeable { } else { LOG.warn("Could not find metadata file for " + block); } + } catch (FileNotFoundException e) { + // The replica is on its volume map but not on disk + datanode.notifyNamenodeDeletedBlock(block, replica.getStorageUuid()); + datanode.data.invalidate(block.getBlockPoolId(), + new Block[]{block.getLocalBlock()}); + throw e; } finally { if (!keepMetaInOpen) { IOUtils.closeStream(metaIn); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java index 47f8f72c545..0b2c7b62bf7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java @@ -752,7 +752,7 @@ class FsDatasetImpl implements FsDatasetSpi { File getBlockFile(String bpid, long blockId) throws IOException { File f = validateBlockFile(bpid, blockId); if(f == null) { - throw new IOException("BlockId " + blockId + " is not valid."); + throw new FileNotFoundException("BlockId " + blockId + " is not valid."); } return f; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java index 1e6db21d8af..81382985964 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeReport.java @@ -29,11 +29,16 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport; +import org.junit.Assert; import org.junit.Test; /** @@ -89,7 +94,37 @@ public class TestDatanodeReport { cluster.shutdown(); } } - + + @Test + public void testDatanodeReportMissingBlock() throws Exception { + conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L); + conf.setLong(HdfsClientConfigKeys.Retry.WINDOW_BASE_KEY, 1); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(NUM_OF_DATANODES).build(); + try { + // wait until the cluster is up + cluster.waitActive(); + DistributedFileSystem fs = cluster.getFileSystem(); + Path p = new Path("/testDatanodeReportMissingBlock"); + DFSTestUtil.writeFile(fs, p, new String("testdata")); + LocatedBlock lb = fs.getClient().getLocatedBlocks(p.toString(), 0).get(0); + assertEquals(3, lb.getLocations().length); + ExtendedBlock b = lb.getBlock(); + cluster.corruptBlockOnDataNodesByDeletingBlockFile(b); + try { + DFSTestUtil.readFile(fs, p); + Assert.fail("Must throw exception as the block doesn't exists on disk"); + } catch (IOException e) { + // all bad datanodes + } + cluster.triggerHeartbeats(); // IBR delete ack + lb = fs.getClient().getLocatedBlocks(p.toString(), 0).get(0); + assertEquals(0, lb.getLocations().length); + } finally { + cluster.shutdown(); + } + } + final static Comparator CMP = new Comparator() { @Override public int compare(StorageReport left, StorageReport right) {