From b104f3a282ab8e15314446e71189eb3a3b5dd9a6 Mon Sep 17 00:00:00 2001 From: Chris Douglas Date: Mon, 20 Mar 2017 17:15:13 -0700 Subject: [PATCH] HDFS-6648. Order of namenodes in ConfiguredFailoverProxyProvider is undefined. Contributed by Inigo Goiri --- .../org/apache/hadoop/hdfs/DFSUtilClient.java | 2 +- .../hadoop/hdfs/client/HdfsClientConfigKeys.java | 2 ++ .../ha/ConfiguredFailoverProxyProvider.java | 15 ++++++++++++--- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java index d2675301065..f9b2e8d7c8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java @@ -366,7 +366,7 @@ static Map> getAddressesForNsIds( static Map getAddressesForNameserviceId( Configuration conf, String nsId, String defaultValue, String... keys) { Collection nnIds = getNameNodeIds(conf, nsId); - Map ret = Maps.newHashMap(); + Map ret = Maps.newLinkedHashMap(); for (String nnId : emptyAsSingletonNull(nnIds)) { String suffix = concatSuffixes(nsId, nnId); String address = getConfValue(defaultValue, suffix, conf, keys); diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java index 6f8c661a5ba..1a388061ceb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java @@ -264,6 +264,8 @@ interface Failover { String CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_KEY = PREFIX + "connection.retries.on.timeouts"; int CONNECTION_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 0; + String RANDOM_ORDER = PREFIX + "random.order"; + boolean RANDOM_ORDER_DEFAULT = false; } /** dfs.client.write configuration properties */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java index c2d4d916261..0e8fa448806 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java @@ -23,6 +23,7 @@ import java.net.URI; import java.util.ArrayList; import java.util.Collection; +import java.util.Collections; import java.util.List; import java.util.Map; import java.util.concurrent.atomic.AtomicBoolean; @@ -43,9 +44,10 @@ import com.google.common.base.Preconditions; /** - * A FailoverProxyProvider implementation which allows one to configure two URIs - * to connect to during fail-over. The first configured address is tried first, - * and on a fail-over event the other address is tried. + * A FailoverProxyProvider implementation which allows one to configure + * multiple URIs to connect to during fail-over. A random configured address is + * tried first, and on a fail-over event the other addresses are tried + * sequentially in a random order. */ public class ConfiguredFailoverProxyProvider extends AbstractNNFailoverProxyProvider { @@ -124,6 +126,13 @@ public ConfiguredFailoverProxyProvider(Configuration conf, URI uri, for (InetSocketAddress address : addressesOfNns) { proxies.add(new AddressRpcProxyPair(address)); } + // Randomize the list to prevent all clients pointing to the same one + boolean randomized = conf.getBoolean( + HdfsClientConfigKeys.Failover.RANDOM_ORDER, + HdfsClientConfigKeys.Failover.RANDOM_ORDER_DEFAULT); + if (randomized) { + Collections.shuffle(proxies); + } // The client may have a delegation token set for the logical // URI of the cluster. Clone this token to apply to each of the