diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 436594ee3a0..101a5dd8d2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -429,6 +429,10 @@ Release 2.6.0 - UNRELEASED HDFS-6810. StorageReport array is initialized with wrong size in DatanodeDescriptor#getStorageReports. (szetszwo via Arpit Agarwal) + + HDFS-5723. Append failed FINALIZED replica should not be accepted as valid + when that block is underconstruction (vinayakumarb) + Release 2.5.0 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 41118183658..190ca892a06 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -2167,6 +2167,16 @@ public class BlockManager { } else { return null; // not corrupt } + case UNDER_CONSTRUCTION: + if (storedBlock.getGenerationStamp() > reported.getGenerationStamp()) { + final long reportedGS = reported.getGenerationStamp(); + return new BlockToMarkCorrupt(storedBlock, reportedGS, "block is " + + ucState + " and reported state " + reportedState + + ", But reported genstamp " + reportedGS + + " does not match genstamp in block map " + + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH); + } + return null; default: return null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 4ea534acdd6..d8400778ec9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.File; import java.io.FileNotFoundException; @@ -32,6 +33,7 @@ import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.HardLink; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties; import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; @@ -39,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; import org.junit.Assert; import org.junit.Test; @@ -169,6 +172,7 @@ public class TestFileAppend{ } } finally { + client.close(); fs.close(); cluster.shutdown(); } @@ -380,4 +384,57 @@ public class TestFileAppend{ } } + /** + * Old replica of the block should not be accepted as valid for append/read + */ + @Test + public void testFailedAppendBlockRejection() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", + "false"); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3) + .build(); + DistributedFileSystem fs = null; + try { + fs = cluster.getFileSystem(); + Path path = new Path("/test"); + FSDataOutputStream out = fs.create(path); + out.writeBytes("hello\n"); + out.close(); + + // stop one datanode + DataNodeProperties dnProp = cluster.stopDataNode(0); + String dnAddress = dnProp.datanode.getXferAddress().toString(); + if (dnAddress.startsWith("/")) { + dnAddress = dnAddress.substring(1); + } + + // append again to bump genstamps + for (int i = 0; i < 2; i++) { + out = fs.append(path); + out.writeBytes("helloagain\n"); + out.close(); + } + + // re-open and make the block state as underconstruction + out = fs.append(path); + cluster.restartDataNode(dnProp, true); + // wait till the block report comes + Thread.sleep(2000); + // check the block locations, this should not contain restarted datanode + BlockLocation[] locations = fs.getFileBlockLocations(path, 0, + Long.MAX_VALUE); + String[] names = locations[0].getNames(); + for (String node : names) { + if (node.equals(dnAddress)) { + fail("Failed append should not be present in latest block locations."); + } + } + out.close(); + } finally { + IOUtils.closeStream(fs); + cluster.shutdown(); + } + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 77f7090e8f1..daa3aaa4c3e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -50,6 +50,7 @@ import org.apache.hadoop.fs.XAttrSetFlag; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSOutputStream; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; @@ -725,7 +726,12 @@ public class TestRetryCacheWithHA { client.getNamenode().updatePipeline(client.getClientName(), oldBlock, newBlock, newNodes, storageIDs); - out.close(); + // close can fail if the out.close() commit the block after block received + // notifications from Datanode. + // Since datanodes and output stream have still old genstamps, these + // blocks will be marked as corrupt after HDFS-5723 if RECEIVED + // notifications reaches namenode first and close() will fail. + DFSTestUtil.abortStream((DFSOutputStream) out.getWrappedStream()); } @Override