HDFS-10544. Balancer doesn't work with IPFailoverProxyProvider.
This commit is contained in:
parent
06c56ff79b
commit
087290e6b1
|
@ -718,10 +718,11 @@ public class DFSUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a URI for each internal nameservice. If a nameservice is
|
* Get a URI for each internal nameservice. If a nameservice is
|
||||||
* HA-enabled, then the logical URI of the nameservice is returned. If the
|
* HA-enabled, and the configured failover proxy provider supports logical
|
||||||
* nameservice is not HA-enabled, then a URI corresponding to an RPC address
|
* URIs, then the logical URI of the nameservice is returned.
|
||||||
* of the single NN for that nameservice is returned, preferring the service
|
* Otherwise, a URI corresponding to an RPC address of the single NN for that
|
||||||
* RPC address over the client RPC address.
|
* nameservice is returned, preferring the service RPC address over the
|
||||||
|
* client RPC address.
|
||||||
*
|
*
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @return a collection of all configured NN URIs, preferring service
|
* @return a collection of all configured NN URIs, preferring service
|
||||||
|
@ -735,9 +736,10 @@ public class DFSUtil {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Get a URI for each configured nameservice. If a nameservice is
|
* Get a URI for each configured nameservice. If a nameservice is
|
||||||
* HA-enabled, then the logical URI of the nameservice is returned. If the
|
* HA-enabled, and the configured failover proxy provider supports logical
|
||||||
* nameservice is not HA-enabled, then a URI corresponding to the address of
|
* URIs, then the logical URI of the nameservice is returned.
|
||||||
* the single NN for that nameservice is returned.
|
* Otherwise, a URI corresponding to the address of the single NN for that
|
||||||
|
* nameservice is returned.
|
||||||
*
|
*
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @param keys configuration keys to try in order to get the URI for non-HA
|
* @param keys configuration keys to try in order to get the URI for non-HA
|
||||||
|
@ -756,13 +758,27 @@ public class DFSUtil {
|
||||||
Set<URI> nonPreferredUris = new HashSet<URI>();
|
Set<URI> nonPreferredUris = new HashSet<URI>();
|
||||||
|
|
||||||
for (String nsId : nameServices) {
|
for (String nsId : nameServices) {
|
||||||
if (HAUtil.isHAEnabled(conf, nsId)) {
|
URI nsUri;
|
||||||
// Add the logical URI of the nameservice.
|
|
||||||
try {
|
try {
|
||||||
ret.add(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId));
|
nsUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId);
|
||||||
} catch (URISyntaxException ue) {
|
} catch (URISyntaxException ue) {
|
||||||
throw new IllegalArgumentException(ue);
|
throw new IllegalArgumentException(ue);
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
|
* Determine whether the logical URI of the name service can be resolved
|
||||||
|
* by the configured failover proxy provider. If not, we should try to
|
||||||
|
* resolve the URI here
|
||||||
|
*/
|
||||||
|
boolean useLogicalUri = false;
|
||||||
|
try {
|
||||||
|
useLogicalUri = HAUtil.useLogicalUri(conf, nsUri);
|
||||||
|
} catch (IOException e){
|
||||||
|
LOG.warn("Getting exception while trying to determine if nameservice "
|
||||||
|
+ nsId + " can use logical URI: " + e);
|
||||||
|
}
|
||||||
|
if (HAUtil.isHAEnabled(conf, nsId) && useLogicalUri) {
|
||||||
|
// Add the logical URI of the nameservice.
|
||||||
|
ret.add(nsUri);
|
||||||
} else {
|
} else {
|
||||||
// Add the URI corresponding to the address of the NN.
|
// Add the URI corresponding to the address of the NN.
|
||||||
boolean uriFound = false;
|
boolean uriFound = false;
|
||||||
|
|
|
@ -62,6 +62,7 @@ import org.apache.hadoop.fs.Path;
|
||||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
|
@ -534,6 +535,10 @@ public class TestDFSUtil {
|
||||||
assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf));
|
assertEquals(null, DFSUtil.getNamenodeNameServiceId(conf));
|
||||||
assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf));
|
assertEquals(null, DFSUtil.getSecondaryNameServiceId(conf));
|
||||||
|
|
||||||
|
String proxyProviderKey = HdfsClientConfigKeys.Failover.
|
||||||
|
PROXY_PROVIDER_KEY_PREFIX + ".ns2";
|
||||||
|
conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha."
|
||||||
|
+ "ConfiguredFailoverProxyProvider");
|
||||||
Collection<URI> uris = getInternalNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
Collection<URI> uris = getInternalNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||||
assertEquals(2, uris.size());
|
assertEquals(2, uris.size());
|
||||||
assertTrue(uris.contains(new URI("hdfs://ns1")));
|
assertTrue(uris.contains(new URI("hdfs://ns1")));
|
||||||
|
@ -633,6 +638,7 @@ public class TestDFSUtil {
|
||||||
public void testGetNNUris() throws Exception {
|
public void testGetNNUris() throws Exception {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration();
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
|
final String NS1_NN_ADDR = "ns1-nn.example.com:9820";
|
||||||
final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820";
|
final String NS1_NN1_ADDR = "ns1-nn1.example.com:9820";
|
||||||
final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820";
|
final String NS1_NN2_ADDR = "ns1-nn2.example.com:9820";
|
||||||
final String NS2_NN_ADDR = "ns2-nn.example.com:9820";
|
final String NS2_NN_ADDR = "ns2-nn.example.com:9820";
|
||||||
|
@ -664,6 +670,9 @@ public class TestDFSUtil {
|
||||||
conf.set(DFSUtil.addKeySuffixes(
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR);
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "nn2"), NS1_NN2_ADDR);
|
||||||
|
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns1"), NS1_NN_ADDR);
|
||||||
|
|
||||||
conf.set(DFSUtil.addKeySuffixes(
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"), NS2_NN_ADDR);
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, "ns2"), NS2_NN_ADDR);
|
||||||
|
|
||||||
|
@ -671,6 +680,40 @@ public class TestDFSUtil {
|
||||||
|
|
||||||
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR);
|
conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://" + NN2_ADDR);
|
||||||
|
|
||||||
|
/**
|
||||||
|
* {@link DFSUtil#getInternalNsRpcUris} decides whether to resolve a logical
|
||||||
|
* URI based on whether the failover proxy provider supports logical URIs.
|
||||||
|
* We will test both cases.
|
||||||
|
*
|
||||||
|
* First configure ns1 to use {@link IPFailoverProxyProvider} which doesn't
|
||||||
|
* support logical Uris. So {@link DFSUtil#getInternalNsRpcUris} will
|
||||||
|
* resolve the logical URI of ns1 based on the configured value at
|
||||||
|
* dfs.namenode.servicerpc-address.ns1, which is {@link NS1_NN_ADDR}
|
||||||
|
*/
|
||||||
|
String proxyProviderKey = HdfsClientConfigKeys.Failover.
|
||||||
|
PROXY_PROVIDER_KEY_PREFIX + ".ns1";
|
||||||
|
conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha."
|
||||||
|
+ "IPFailoverProxyProvider");
|
||||||
|
|
||||||
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
||||||
|
assertEquals("Incorrect number of URIs returned", 3, uris.size());
|
||||||
|
assertTrue("Missing URI for RPC address",
|
||||||
|
uris.contains(new URI("hdfs://" + NN1_ADDR)));
|
||||||
|
assertTrue("Missing URI for name service ns2",
|
||||||
|
uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
|
||||||
|
NS1_NN_ADDR)));
|
||||||
|
assertTrue("Missing URI for name service ns2",
|
||||||
|
uris.contains(new URI(HdfsConstants.HDFS_URI_SCHEME + "://" +
|
||||||
|
NS2_NN_ADDR)));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Second, test ns1 with {@link ConfiguredFailoverProxyProvider} which does
|
||||||
|
* support logical URIs. So instead of {@link NS1_NN_ADDR}, the logical URI
|
||||||
|
* of ns1, hdfs://ns1, will be returned.
|
||||||
|
*/
|
||||||
|
conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha."
|
||||||
|
+ "ConfiguredFailoverProxyProvider");
|
||||||
|
|
||||||
uris = DFSUtil.getInternalNsRpcUris(conf);
|
uris = DFSUtil.getInternalNsRpcUris(conf);
|
||||||
|
|
||||||
assertEquals("Incorrect number of URIs returned", 3, uris.size());
|
assertEquals("Incorrect number of URIs returned", 3, uris.size());
|
||||||
|
|
Loading…
Reference in New Issue