HDFS-11998. Enable DFSNetworkTopology as default. Contributed by Chen Liang.

This commit is contained in:
Arpit Agarwal 2017-08-28 16:22:42 -07:00
parent 98ba68b05d
commit e19f004f65
4 changed files with 27 additions and 2 deletions

View File

@ -987,7 +987,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_USE_DFS_NETWORK_TOPOLOGY_KEY =
"dfs.use.dfs.network.topology";
public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = false;
public static final boolean DFS_USE_DFS_NETWORK_TOPOLOGY_DEFAULT = true;
// dfs.client.retry confs are moved to HdfsClientConfigKeys.Retry
@Deprecated

View File

@ -24,10 +24,13 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AVAILABLE_SPACE_
import java.util.Collection;
import java.util.Random;
import com.google.common.base.Preconditions;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@ -69,6 +72,19 @@ public class AvailableSpaceBlockPlacementPolicy extends
balancedPreference = (int) (100 * balancedPreferencePercent);
}
@Override
protected DatanodeDescriptor chooseDataNode(final String scope,
final Collection<Node> excludedNode, StorageType type) {
// only the code that uses DFSNetworkTopology should trigger this code path.
Preconditions.checkArgument(clusterMap instanceof DFSNetworkTopology);
DFSNetworkTopology dfsClusterMap = (DFSNetworkTopology)clusterMap;
DatanodeDescriptor a = (DatanodeDescriptor) dfsClusterMap
.chooseRandomWithStorageType(scope, excludedNode, type);
DatanodeDescriptor b = (DatanodeDescriptor) dfsClusterMap
.chooseRandomWithStorageType(scope, excludedNode, type);
return select(a, b);
}
@Override
protected DatanodeDescriptor chooseDataNode(final String scope,
final Collection<Node> excludedNode) {
@ -76,6 +92,11 @@ public class AvailableSpaceBlockPlacementPolicy extends
(DatanodeDescriptor) clusterMap.chooseRandom(scope, excludedNode);
DatanodeDescriptor b =
(DatanodeDescriptor) clusterMap.chooseRandom(scope, excludedNode);
return select(a, b);
}
private DatanodeDescriptor select(
DatanodeDescriptor a, DatanodeDescriptor b) {
if (a != null && b != null){
int ret = compareDataNode(a, b);
if (ret == 0) {

View File

@ -4286,7 +4286,7 @@
<property>
<name>dfs.use.dfs.network.topology</name>
<value>false</value>
<value>true</value>
<description>
Enables DFSNetworkTopology to choose nodes for placing replicas.
</description>

View File

@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_USE_DFS_NETWORK_TOPOLOGY_KEY;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@ -51,6 +52,9 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
@Override
DatanodeDescriptor[] getDatanodeDescriptors(Configuration conf) {
// default is true, in this case this test will against DFSNetworkTopology
// but it run on NetworkTopologyWithNodeGroup, so set to false.
conf.setBoolean(DFS_USE_DFS_NETWORK_TOPOLOGY_KEY, false);
conf.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY,
NetworkTopologyWithNodeGroup.class.getName());
final String[] racks = {