HDFS-4067. TestUnderReplicatedBlocks intermittently fails due to ReplicaAlreadyExistsException. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1402261 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-10-25 18:32:41 +00:00
parent 611684e2e5
commit 163577f905
2 changed files with 10 additions and 0 deletions

View File

@ -235,6 +235,9 @@ Trunk (Unreleased)
HDFS-2434. TestNameNodeMetrics.testCorruptBlock fails intermittently. HDFS-2434. TestNameNodeMetrics.testCorruptBlock fails intermittently.
(Jing Zhao via suresh) (Jing Zhao via suresh)
HDFS-4067. TestUnderReplicatedBlocks intermittently fails due to
ReplicaAlreadyExistsException. (Jing Zhao via suresh)
BREAKDOWN OF HDFS-3077 SUBTASKS BREAKDOWN OF HDFS-3077 SUBTASKS
HDFS-3077. Quorum-based protocol for reading and writing edit logs. HDFS-3077. Quorum-based protocol for reading and writing edit logs.

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test; import org.junit.Test;
public class TestUnderReplicatedBlocks { public class TestUnderReplicatedBlocks {
@ -49,6 +50,12 @@ public class TestUnderReplicatedBlocks {
ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH); ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
DatanodeDescriptor dn = bm.blocksMap.nodeIterator(b.getLocalBlock()).next(); DatanodeDescriptor dn = bm.blocksMap.nodeIterator(b.getLocalBlock()).next();
bm.addToInvalidates(b.getLocalBlock(), dn); bm.addToInvalidates(b.getLocalBlock(), dn);
// Compute the invalidate work in NN, and trigger the heartbeat from DN
BlockManagerTestUtil.computeAllPendingWork(bm);
DataNodeTestUtils.triggerHeartbeat(cluster.getDataNode(dn.getIpcPort()));
// Wait to make sure the DataNode receives the deletion request
Thread.sleep(1000);
// Remove the record from blocksMap
bm.blocksMap.removeNode(b.getLocalBlock(), dn); bm.blocksMap.removeNode(b.getLocalBlock(), dn);
// increment this file's replication factor // increment this file's replication factor