HDFS-9083. Replication violates block placement policy (Rushabh Shah)
This commit is contained in:
parent
a9001a210a
commit
b9a6f9aa16
|
@ -18,6 +18,8 @@ Release 2.6.3 - UNRELEASED
|
||||||
HDFS-9431. DistributedFileSystem#concat fails if the target path is
|
HDFS-9431. DistributedFileSystem#concat fails if the target path is
|
||||||
relative. (Kazuho Fujii via aajisaka)
|
relative. (Kazuho Fujii via aajisaka)
|
||||||
|
|
||||||
|
HDFS-9083. Replication violates block placement policy (Rushabh Shah)
|
||||||
|
|
||||||
Release 2.6.2 - 2015-10-28
|
Release 2.6.2 - 2015-10-28
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -225,9 +225,6 @@ public class BlockManager {
|
||||||
|
|
||||||
final float blocksInvalidateWorkPct;
|
final float blocksInvalidateWorkPct;
|
||||||
final int blocksReplWorkMultiplier;
|
final int blocksReplWorkMultiplier;
|
||||||
|
|
||||||
/** variable to enable check for enough racks */
|
|
||||||
final boolean shouldCheckForEnoughRacks;
|
|
||||||
|
|
||||||
// whether or not to issue block encryption keys.
|
// whether or not to issue block encryption keys.
|
||||||
final boolean encryptDataTransfer;
|
final boolean encryptDataTransfer;
|
||||||
|
@ -325,9 +322,6 @@ public class BlockManager {
|
||||||
conf.getInt(
|
conf.getInt(
|
||||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
|
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_KEY,
|
||||||
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
|
DFSConfigKeys.DFS_NAMENODE_REPLICATION_STREAMS_HARD_LIMIT_DEFAULT);
|
||||||
this.shouldCheckForEnoughRacks =
|
|
||||||
conf.get(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY) == null
|
|
||||||
? false : true;
|
|
||||||
|
|
||||||
this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
|
this.blocksInvalidateWorkPct = DFSUtil.getInvalidateWorkPctPerIteration(conf);
|
||||||
this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
|
this.blocksReplWorkMultiplier = DFSUtil.getReplWorkMultiplier(conf);
|
||||||
|
@ -351,7 +345,6 @@ public class BlockManager {
|
||||||
LOG.info("maxReplication = " + maxReplication);
|
LOG.info("maxReplication = " + maxReplication);
|
||||||
LOG.info("minReplication = " + minReplication);
|
LOG.info("minReplication = " + minReplication);
|
||||||
LOG.info("maxReplicationStreams = " + maxReplicationStreams);
|
LOG.info("maxReplicationStreams = " + maxReplicationStreams);
|
||||||
LOG.info("shouldCheckForEnoughRacks = " + shouldCheckForEnoughRacks);
|
|
||||||
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
|
LOG.info("replicationRecheckInterval = " + replicationRecheckInterval);
|
||||||
LOG.info("encryptDataTransfer = " + encryptDataTransfer);
|
LOG.info("encryptDataTransfer = " + encryptDataTransfer);
|
||||||
LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog);
|
LOG.info("maxNumBlocksToLog = " + maxNumBlocksToLog);
|
||||||
|
@ -3548,9 +3541,6 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean blockHasEnoughRacks(Block b) {
|
boolean blockHasEnoughRacks(Block b) {
|
||||||
if (!this.shouldCheckForEnoughRacks) {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
boolean enoughRacks = false;;
|
boolean enoughRacks = false;;
|
||||||
Collection<DatanodeDescriptor> corruptNodes =
|
Collection<DatanodeDescriptor> corruptNodes =
|
||||||
corruptReplicas.getNodes(b);
|
corruptReplicas.getNodes(b);
|
||||||
|
|
|
@ -827,4 +827,28 @@ public class TestBlockManager {
|
||||||
Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
|
Assert.assertFalse(BlockManager.useDelHint(true, delHint, null,
|
||||||
moreThan1Racks, excessTypes));
|
moreThan1Racks, excessTypes));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link BlockManager#blockHasEnoughRacks(BlockInfo)} should return false
|
||||||
|
* if all the replicas are on the same rack and shouldn't be dependent on
|
||||||
|
* CommonConfigurationKeysPublic.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY
|
||||||
|
* @throws Exception
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testAllReplicasOnSameRack() throws Exception {
|
||||||
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
conf.unset(DFSConfigKeys.NET_TOPOLOGY_SCRIPT_FILE_NAME_KEY);
|
||||||
|
fsn = Mockito.mock(FSNamesystem.class);
|
||||||
|
Mockito.doReturn(true).when(fsn).hasWriteLock();
|
||||||
|
Mockito.doReturn(true).when(fsn).hasReadLock();
|
||||||
|
bm = new BlockManager(fsn, fsn, conf);
|
||||||
|
// Add nodes on two racks
|
||||||
|
addNodes(nodes);
|
||||||
|
// Added a new block in blocksMap and all the replicas are on the same rack
|
||||||
|
BlockInfo blockInfo = addBlockOnNodes(1, rackA);
|
||||||
|
// Since the network toppolgy is multi-rack, the blockHasEnoughRacks
|
||||||
|
// should return false.
|
||||||
|
assertFalse("Replicas for block is not stored on enough racks",
|
||||||
|
bm.blockHasEnoughRacks(blockInfo));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue