HDFS-8586. Dead Datanode is allocated for write when client is from deadnode (Contributed by Brahma Reddy Battula)

This commit is contained in:
Vinayakumar B 2015-06-29 15:25:03 +05:30
parent a95d39f9d0
commit 88ceb382ef
3 changed files with 47 additions and 1 deletions

View File

@ -968,6 +968,9 @@ Release 2.8.0 - UNRELEASED
HDFS-8546. Prune cached replicas from DatanodeDescriptor state on replica HDFS-8546. Prune cached replicas from DatanodeDescriptor state on replica
invalidation. (wang) invalidation. (wang)
HDFS-8586. Dead Datanode is allocated for write when client is from deadnode
(Brahma Reddy Battula via vinayakumarb)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -454,7 +454,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize,
maxNodesPerRack, results, avoidStaleNodes, storageTypes); maxNodesPerRack, results, avoidStaleNodes, storageTypes);
} }
if (preferLocalNode && localMachine instanceof DatanodeDescriptor) { if (preferLocalNode && localMachine instanceof DatanodeDescriptor
&& clusterMap.contains(localMachine)) {
DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine; DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine;
// otherwise try local machine first // otherwise try local machine first
if (excludedNodes.add(localMachine)) { // was not in the excluded list if (excludedNodes.add(localMachine)) { // was not in the excluded list

View File

@ -18,9 +18,11 @@
package org.apache.hadoop.hdfs.server.namenode; package org.apache.hadoop.hdfs.server.namenode;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.fail; import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.util.HashSet;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -31,6 +33,9 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs; import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.protocol.BlockReportContext; import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@ -43,6 +48,7 @@ import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport; import org.apache.hadoop.hdfs.server.protocol.StorageBlockReport;
import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks;
import org.apache.hadoop.hdfs.server.protocol.StorageReport; import org.apache.hadoop.hdfs.server.protocol.StorageReport;
import org.apache.hadoop.net.Node;
import org.junit.After; import org.junit.After;
import org.junit.Test; import org.junit.Test;
@ -126,4 +132,40 @@ public class TestDeadDatanode {
assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction()); .getAction());
} }
@Test
public void testDeadNodeAsBlockTarget() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500);
conf.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
cluster.waitActive();
String poolId = cluster.getNamesystem().getBlockPoolId();
// wait for datanode to be marked live
DataNode dn = cluster.getDataNodes().get(0);
DatanodeRegistration reg = DataNodeTestUtils.getDNRegistrationForBP(cluster
.getDataNodes().get(0), poolId);
// Get the updated datanode descriptor
BlockManager bm = cluster.getNamesystem().getBlockManager();
DatanodeManager dm = bm.getDatanodeManager();
Node clientNode = dm.getDatanode(reg);
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), true,
20000);
// Shutdown and wait for datanode to be marked dead
dn.shutdown();
DFSTestUtil.waitForDatanodeState(cluster, reg.getDatanodeUuid(), false,
20000);
// Get the updated datanode descriptor available in DNM
// choose the targets, but local node should not get selected as this is not
// part of the cluster anymore
DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7);
for (DatanodeStorageInfo datanodeStorageInfo : results) {
assertFalse("Dead node should not be choosen", datanodeStorageInfo
.getDatanodeDescriptor().equals(clientNode));
}
}
} }