HDFS-9083. Replication violates block placement policy. Contributed by Rushabh Shah.

This commit is contained in:
Kihwal Lee 2015-10-28 14:49:41 -05:00
parent 336be63dad
commit 13b256ed22
3 changed files with 26 additions and 10 deletions

View File

@ -62,6 +62,8 @@ Release 2.7.2 - UNRELEASED
HDFS-9043. Doc updation for commands in HDFS Federation HDFS-9043. Doc updation for commands in HDFS Federation
(J.Andreina via vinayakumab) (J.Andreina via vinayakumab)
HDFS-9083. Replication violates block placement policy (Rushabh Shah)
HDFS-9106. Transfer failure during pipeline recovery causes permanent HDFS-9106. Transfer failure during pipeline recovery causes permanent
write failures (kihwal) write failures (kihwal)

View File

@ -224,9 +224,6 @@ public class BlockManager {
final float blocksInvalidateWorkPct; final float blocksInvalidateWorkPct;
final int blocksReplWorkMultiplier; final int blocksReplWorkMultiplier;
/** variable to enable check for enough racks */
final boolean shouldCheckForEnoughRacks;
// whether or not to issue block encryption keys. // whether or not to issue block encryption keys.
final boolean encryptDataTransfer; final boolean encryptDataTransfer;
@ -325,9 +322,6 @@ public class BlockManager {
conf.getInt( conf.getInt(
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT); DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
this.shouldCheckForEnoughRacks =
conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
? false : true;
this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf); this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
@ -351,7 +345,6 @@ public class BlockManager {
LOG.info("maxReplication = " + maxReplication); LOG.info("maxReplication = " + maxReplication);
LOG.info("minReplication = " + minReplication); LOG.info("minReplication = " + minReplication);
LOG.info("maxReplicationStreams = " + maxReplicationStreams); LOG.info("maxReplicationStreams = " + maxReplicationStreams);
LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
LOG.info("encryptDataTransfer = " + encryptDataTransfer); LOG.info("encryptDataTransfer = " + encryptDataTransfer);
LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog); LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog);
@ -3490,9 +3483,6 @@ public class BlockManager {
} }
boolean blockHasEnoughRacks(Block b) { boolean blockHasEnoughRacks(Block b) {
if (!this.shouldCheckForEnoughRacks) {
return true;
}
boolean enoughRacks = false;; boolean enoughRacks = false;;
Collection<DatanodeDescriptor> corruptNodes = Collection<DatanodeDescriptor> corruptNodes =
corruptReplicas.getNodes(b); corruptReplicas.getNodes(b);

View File

@ -816,4 +816,28 @@ public class TestBlockManager {
Assert.assertFalse(BlockManager.useDelHint(true, delHint, null, Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
moreThan1Racks, excessTypes)); moreThan1Racks, excessTypes));
} }
/**
* {@link BlockManager#blockHasEnoughRacks(BlockInfo)} should return false
* if all the replicas are on the same rack and shouldn't be dependent on
* CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY
* @throws Exception
*/
@Test
public void testAllReplicasOnSameRack() throws Exception {
Configuration conf = new HdfsConfiguration();
conf.unset(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
Mockito.doReturn(true).when(fsn).hasReadLock();
bm = new BlockManager(fsn, conf);
// Add nodes on two racks
addNodes(nodes);
// Added a new block in blocksMap and all the replicas are on the same rack
BlockInfoContiguous blockInfo = addBlockOnNodes(1, rackA);
// Since the network toppolgy is multi-rack, the blockHasEnoughRacks
// should return false.
assertFalse("Replicas for block is not stored on enough racks",
bm.blockHasEnoughRacks(blockInfo));
}
} }