HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be easily overrided. (Contributed by Walter Su)
This commit is contained in:
parent
4be648b55c
commit
d505c8acd3
|
@ -385,6 +385,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
|
HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
|
||||||
HdfsClientConfigKeys.Retry. (szetszwo)
|
HdfsClientConfigKeys.Retry. (szetszwo)
|
||||||
|
|
||||||
|
HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
|
||||||
|
can be easily overrided. (Walter Su via vinayakumarb)
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
|
||||||
|
|
|
@ -333,41 +333,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
||||||
+ " unavailableStorages=" + unavailableStorages
|
+ " unavailableStorages=" + unavailableStorages
|
||||||
+ ", storagePolicy=" + storagePolicy);
|
+ ", storagePolicy=" + storagePolicy);
|
||||||
}
|
}
|
||||||
|
writer = chooseTargetInOrder(numOfReplicas, writer, excludedNodes, blocksize,
|
||||||
if (numOfResults == 0) {
|
maxNodesPerRack, results, avoidStaleNodes, newBlock, storageTypes);
|
||||||
writer = chooseLocalStorage(writer, excludedNodes, blocksize,
|
|
||||||
maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
|
|
||||||
.getDatanodeDescriptor();
|
|
||||||
if (--numOfReplicas == 0) {
|
|
||||||
return writer;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
|
|
||||||
if (numOfResults <= 1) {
|
|
||||||
chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
|
|
||||||
results, avoidStaleNodes, storageTypes);
|
|
||||||
if (--numOfReplicas == 0) {
|
|
||||||
return writer;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (numOfResults <= 2) {
|
|
||||||
final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
|
|
||||||
if (clusterMap.isOnSameRack(dn0, dn1)) {
|
|
||||||
chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
|
|
||||||
results, avoidStaleNodes, storageTypes);
|
|
||||||
} else if (newBlock){
|
|
||||||
chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
|
|
||||||
results, avoidStaleNodes, storageTypes);
|
|
||||||
} else {
|
|
||||||
chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
|
|
||||||
results, avoidStaleNodes, storageTypes);
|
|
||||||
}
|
|
||||||
if (--numOfReplicas == 0) {
|
|
||||||
return writer;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
|
|
||||||
maxNodesPerRack, results, avoidStaleNodes, storageTypes);
|
|
||||||
} catch (NotEnoughReplicasException e) {
|
} catch (NotEnoughReplicasException e) {
|
||||||
final String message = "Failed to place enough replicas, still in need of "
|
final String message = "Failed to place enough replicas, still in need of "
|
||||||
+ (totalReplicasExpected - results.size()) + " to reach "
|
+ (totalReplicasExpected - results.size()) + " to reach "
|
||||||
|
@ -423,6 +390,54 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
|
||||||
return writer;
|
return writer;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protected Node chooseTargetInOrder(int numOfReplicas,
|
||||||
|
Node writer,
|
||||||
|
final Set<Node> excludedNodes,
|
||||||
|
final long blocksize,
|
||||||
|
final int maxNodesPerRack,
|
||||||
|
final List<DatanodeStorageInfo> results,
|
||||||
|
final boolean avoidStaleNodes,
|
||||||
|
final boolean newBlock,
|
||||||
|
EnumMap<StorageType, Integer> storageTypes)
|
||||||
|
throws NotEnoughReplicasException {
|
||||||
|
final int numOfResults = results.size();
|
||||||
|
if (numOfResults == 0) {
|
||||||
|
writer = chooseLocalStorage(writer, excludedNodes, blocksize,
|
||||||
|
maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
|
||||||
|
.getDatanodeDescriptor();
|
||||||
|
if (--numOfReplicas == 0) {
|
||||||
|
return writer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
|
||||||
|
if (numOfResults <= 1) {
|
||||||
|
chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
|
||||||
|
results, avoidStaleNodes, storageTypes);
|
||||||
|
if (--numOfReplicas == 0) {
|
||||||
|
return writer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (numOfResults <= 2) {
|
||||||
|
final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
|
||||||
|
if (clusterMap.isOnSameRack(dn0, dn1)) {
|
||||||
|
chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
|
||||||
|
results, avoidStaleNodes, storageTypes);
|
||||||
|
} else if (newBlock){
|
||||||
|
chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
|
||||||
|
results, avoidStaleNodes, storageTypes);
|
||||||
|
} else {
|
||||||
|
chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
|
||||||
|
results, avoidStaleNodes, storageTypes);
|
||||||
|
}
|
||||||
|
if (--numOfReplicas == 0) {
|
||||||
|
return writer;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
|
||||||
|
maxNodesPerRack, results, avoidStaleNodes, storageTypes);
|
||||||
|
return writer;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Choose <i>localMachine</i> as the target.
|
* Choose <i>localMachine</i> as the target.
|
||||||
* if <i>localMachine</i> is not available,
|
* if <i>localMachine</i> is not available,
|
||||||
|
|
Loading…
Reference in New Issue