From 08a39a9e733f23e3e5dbc33611dbed20ca5b58bd Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Wed, 23 May 2012 18:01:11 +0000 Subject: [PATCH] svn merge -c 1341961 from trunk for HDFS-3436. In DataNode.transferReplicaForPipelineRecovery(..), it should use the stored generation stamp to check if the block is valid. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1341962 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 10 +++++-- .../hadoop/hdfs/server/datanode/DataNode.java | 23 ++++++++------- .../hadoop/hdfs/TestFileAppendRestart.java | 28 +++++++++++++++++++ 3 files changed, 48 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c02bea62c45..58012cb9059 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -97,6 +97,13 @@ Release 2.0.1-alpha - UNRELEASED HDFS-2717. BookKeeper Journal output stream doesn't check addComplete rc. (Ivan Kelly via umamahesh) + HDFS-3415. Make sure all layout versions are the same for all storage + directories in the Namenode. (Brandon Li via szetszwo) + + HDFS-3436. In DataNode.transferReplicaForPipelineRecovery(..), it should + use the stored generation stamp to check if the block is valid. (Vinay + via szetszwo) + Release 2.0.0-alpha - UNRELEASED INCOMPATIBLE CHANGES @@ -619,9 +626,6 @@ Release 2.0.0-alpha - UNRELEASED HDFS-860. fuse-dfs truncate behavior causes issues with scp. (Brian Bockelman via eli) - HDFS-3415. Make sure all layout versions are the same for all storage - directories in the Namenode. (Brandon Li via szetszwo) - BREAKDOWN OF HDFS-1623 SUBTASKS HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 486f735d0da..94da81c4210 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -2032,6 +2032,18 @@ public class DataNode extends Configured //get replica information synchronized(data) { + Block storedBlock = data.getStoredBlock(b.getBlockPoolId(), + b.getBlockId()); + if (null == storedBlock) { + throw new IOException(b + " not found in datanode."); + } + storedGS = storedBlock.getGenerationStamp(); + if (storedGS < b.getGenerationStamp()) { + throw new IOException(storedGS + + " = storedGS < b.getGenerationStamp(), b=" + b); + } + // Update the genstamp with storedGS + b.setGenerationStamp(storedGS); if (data.isValidRbw(b)) { stage = BlockConstructionStage.TRANSFER_RBW; } else if (data.isValidBlock(b)) { @@ -2040,18 +2052,9 @@ public class DataNode extends Configured final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId()); throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r); } - - storedGS = data.getStoredBlock(b.getBlockPoolId(), - b.getBlockId()).getGenerationStamp(); - if (storedGS < b.getGenerationStamp()) { - throw new IOException( - storedGS + " = storedGS < b.getGenerationStamp(), b=" + b); - } visible = data.getReplicaVisibleLength(b); } - - //set storedGS and visible length - b.setGenerationStamp(storedGS); + //set visible length b.setNumBytes(visible); if (targets.length > 0) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java index e10eab8c57d..6b18965687b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java @@ -176,4 +176,32 @@ public class TestFileAppendRestart { cluster.shutdown(); } } + + /** + * Test to append to the file, when one of datanode in the existing pipeline is down. + * @throws Exception + */ + @Test + public void testAppendWithPipelineRecovery() throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).manageDataDfsDirs(true) + .manageNameDfsDirs(true).numDataNodes(4) + .racks(new String[] { "/rack1", "/rack1", "/rack1", "/rack2" }) + .build(); + cluster.waitActive(); + + DistributedFileSystem fs = cluster.getFileSystem(); + Path path = new Path("/test1"); + DFSTestUtil.createFile(fs, path, 1024, (short) 3, 1l); + + cluster.stopDataNode(3); + DFSTestUtil.appendFile(fs, path, "hello"); + } finally { + if (null != cluster) { + cluster.shutdown(); + } + } + } }