diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e63532d0fa7..bd6e9f08c80 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -62,6 +62,8 @@ Release 2.7.2 - UNRELEASED HDFS-9043. Doc updation for commands in HDFS Federation (J.Andreina via vinayakumab) + HDFS-9083. Replication violates block placement policy (Rushabh Shah) + HDFS-9106. Transfer failure during pipeline recovery causes permanent write failures (kihwal) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index d7703464d75..c360d4c1e33 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -224,9 +224,6 @@ public class BlockManager { final float blocksInvalidateWorkPct; final int blocksReplWorkMultiplier; - - /** variable to enable check for enough racks */ - final boolean shouldCheckForEnoughRacks; // whether or not to issue block encryption keys. final boolean encryptDataTransfer; @@ -325,9 +322,6 @@ public class BlockManager { conf.getInt( DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY, DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT); - this.shouldCheckForEnoughRacks = - conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null - ? false : true; this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf); this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf); @@ -351,7 +345,6 @@ public class BlockManager { LOG.info("maxReplication = " + maxReplication); LOG.info("minReplication = " + minReplication); LOG.info("maxReplicationStreams = " + maxReplicationStreams); - LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks); LOG.info("replicationRecheckInterval = " + replicationRecheckInterval); LOG.info("encryptDataTransfer = " + encryptDataTransfer); LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog); @@ -3490,9 +3483,6 @@ public class BlockManager { } boolean blockHasEnoughRacks(Block b) { - if (!this.shouldCheckForEnoughRacks) { - return true; - } boolean enoughRacks = false;; Collection corruptNodes = corruptReplicas.getNodes(b); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java index fba840ed8ed..e026a53639e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java @@ -816,4 +816,28 @@ public class TestBlockManager { Assert.assertFalse(BlockManager.useDelHint(true, delHint, null, moreThan1Racks, excessTypes)); } + + /** + * {@link BlockManager#blockHasEnoughRacks(BlockInfo)} should return false + * if all the replicas are on the same rack and shouldn't be dependent on + * CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY + * @throws Exception + */ + @Test + public void testAllReplicasOnSameRack() throws Exception { + Configuration conf = new HdfsConfiguration(); + conf.unset(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY); + fsn = Mockito.mock(FSNamesystem.class); + Mockito.doReturn(true).when(fsn).hasWriteLock(); + Mockito.doReturn(true).when(fsn).hasReadLock(); + bm = new BlockManager(fsn, conf); + // Add nodes on two racks + addNodes(nodes); + // Added a new block in blocksMap and all the replicas are on the same rack + BlockInfoContiguous blockInfo = addBlockOnNodes(1, rackA); + // Since the network toppolgy is multi-rack, the blockHasEnoughRacks + // should return false. + assertFalse("Replicas for block is not stored on enough racks", + bm.blockHasEnoughRacks(blockInfo)); + } }