HDFS-2582. Scope dfs.ha.namenodes config by nameservice. Contributed by Todd Lipcon.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1207738 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
73b3de6204
commit
9146ad23f3
@ -23,3 +23,5 @@ HDFS-2393. Mark appropriate methods of ClientProtocol with the idempotent annota
|
|||||||
HDFS-2523. Small NN fixes to include HAServiceProtocol and prevent NPE on shutdown. (todd)
|
HDFS-2523. Small NN fixes to include HAServiceProtocol and prevent NPE on shutdown. (todd)
|
||||||
|
|
||||||
HDFS-2577. NN fails to start since it tries to start secret manager in safemode. (todd)
|
HDFS-2577. NN fails to start since it tries to start secret manager in safemode. (todd)
|
||||||
|
|
||||||
|
HDFS-2582. Scope dfs.ha.namenodes config by nameservice (todd)
|
||||||
|
@ -24,10 +24,11 @@
|
|||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
import java.net.URISyntaxException;
|
import java.net.URISyntaxException;
|
||||||
import java.util.ArrayList;
|
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.Collections;
|
||||||
import java.util.Comparator;
|
import java.util.Comparator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.StringTokenizer;
|
import java.util.StringTokenizer;
|
||||||
|
|
||||||
@ -45,11 +46,14 @@
|
|||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
|
||||||
import org.apache.hadoop.net.NetUtils;
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
|
||||||
|
import com.google.common.base.Joiner;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.common.collect.Maps;
|
||||||
|
|
||||||
@InterfaceAudience.Private
|
@InterfaceAudience.Private
|
||||||
public class DFSUtil {
|
public class DFSUtil {
|
||||||
private DFSUtil() { /* Hidden constructor */ }
|
private DFSUtil() { /* Hidden constructor */ }
|
||||||
@ -288,10 +292,22 @@ public static BlockLocation[] locatedBlocks2Locations(LocatedBlocks blocks) {
|
|||||||
/**
|
/**
|
||||||
* Returns collection of nameservice Ids from the configuration.
|
* Returns collection of nameservice Ids from the configuration.
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @return collection of nameservice Ids
|
* @return collection of nameservice Ids, or null if not specified
|
||||||
*/
|
*/
|
||||||
public static Collection<String> getNameServiceIds(Configuration conf) {
|
public static Collection<String> getNameServiceIds(Configuration conf) {
|
||||||
return conf.getStringCollection(DFS_FEDERATION_NAMESERVICES);
|
return conf.getTrimmedStringCollection(DFS_FEDERATION_NAMESERVICES);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @return <code>coll</code> if it is non-null and non-empty. Otherwise,
|
||||||
|
* returns a list with a single null value.
|
||||||
|
*/
|
||||||
|
private static Collection<String> emptyAsSingletonNull(Collection<String> coll) {
|
||||||
|
if (coll == null || coll.isEmpty()) {
|
||||||
|
return Collections.singletonList(null);
|
||||||
|
} else {
|
||||||
|
return coll;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -300,12 +316,14 @@ public static Collection<String> getNameServiceIds(Configuration conf) {
|
|||||||
* for each namenode in the in the HA setup.
|
* for each namenode in the in the HA setup.
|
||||||
*
|
*
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
|
* @param nsId the nameservice ID to look at, or null for non-federated
|
||||||
* @return collection of namenode Ids
|
* @return collection of namenode Ids
|
||||||
*/
|
*/
|
||||||
public static Collection<String> getNameNodeIds(Configuration conf) {
|
static Collection<String> getNameNodeIds(Configuration conf, String nsId) {
|
||||||
return conf.getStringCollection(DFS_HA_NAMENODES_KEY);
|
String key = addSuffix(DFS_HA_NAMENODES_KEY, nsId);
|
||||||
|
return conf.getTrimmedStringCollection(key);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given a list of keys in the order of preference, returns a value
|
* Given a list of keys in the order of preference, returns a value
|
||||||
* for the key in the given order from the configuration.
|
* for the key in the given order from the configuration.
|
||||||
@ -333,13 +351,12 @@ private static String getConfValue(String defaultValue, String keySuffix,
|
|||||||
|
|
||||||
/** Add non empty and non null suffix to a key */
|
/** Add non empty and non null suffix to a key */
|
||||||
private static String addSuffix(String key, String suffix) {
|
private static String addSuffix(String key, String suffix) {
|
||||||
if (suffix == null || suffix.length() == 0) {
|
if (suffix == null || suffix.isEmpty()) {
|
||||||
return key;
|
return key;
|
||||||
}
|
}
|
||||||
if (!suffix.startsWith(".")) {
|
assert !suffix.startsWith(".") :
|
||||||
key += ".";
|
"suffix '" + suffix + "' should not already have '.' prepended.";
|
||||||
}
|
return key + "." + suffix;
|
||||||
return key += suffix;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Concatenate list of suffix strings '.' separated */
|
/** Concatenate list of suffix strings '.' separated */
|
||||||
@ -347,11 +364,7 @@ private static String concatSuffixes(String... suffixes) {
|
|||||||
if (suffixes == null) {
|
if (suffixes == null) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
String ret = "";
|
return Joiner.on(".").skipNulls().join(suffixes);
|
||||||
for (int i = 0; i < suffixes.length - 1; i++) {
|
|
||||||
ret = addSuffix(ret, suffixes[i]);
|
|
||||||
}
|
|
||||||
return addSuffix(ret, suffixes[suffixes.length - 1]);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -363,69 +376,44 @@ public static String addKeySuffixes(String key, String... suffixes) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns list of InetSocketAddress for a given set of keys.
|
* Returns the configured address for all NameNodes in the cluster.
|
||||||
* @param conf configuration
|
* @param conf configuration
|
||||||
* @param defaultAddress default address to return in case key is not found
|
* @param defaultAddress default address to return in case key is not found.
|
||||||
* @param keys Set of keys to look for in the order of preference
|
* @param keys Set of keys to look for in the order of preference
|
||||||
* @return list of InetSocketAddress corresponding to the key
|
* @return a map(nameserviceId to map(namenodeId to InetSocketAddress))
|
||||||
*/
|
*/
|
||||||
private static List<InetSocketAddress> getAddresses(Configuration conf,
|
private static Map<String, Map<String, InetSocketAddress>>
|
||||||
|
getAddresses(Configuration conf,
|
||||||
String defaultAddress, String... keys) {
|
String defaultAddress, String... keys) {
|
||||||
Collection<String> nameserviceIds = getNameServiceIds(conf);
|
Collection<String> nameserviceIds = getNameServiceIds(conf);
|
||||||
Collection<String> namenodeIds = getNameNodeIds(conf);
|
|
||||||
List<InetSocketAddress> isas = new ArrayList<InetSocketAddress>();
|
// Look for configurations of the form <key>[.<nameserviceId>][.<namenodeId>]
|
||||||
|
// across all of the configured nameservices and namenodes.
|
||||||
|
Map<String, Map<String, InetSocketAddress>> ret = Maps.newHashMap();
|
||||||
|
for (String nsId : emptyAsSingletonNull(nameserviceIds)) {
|
||||||
|
Map<String, InetSocketAddress> isas =
|
||||||
|
getAddressesForNameserviceId(conf, nsId, defaultAddress, keys);
|
||||||
|
if (!isas.isEmpty()) {
|
||||||
|
ret.put(nsId, isas);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
final boolean federationEnabled = nameserviceIds != null
|
private static Map<String, InetSocketAddress> getAddressesForNameserviceId(
|
||||||
&& !nameserviceIds.isEmpty();
|
Configuration conf, String nsId, String defaultValue,
|
||||||
final boolean haEnabled = namenodeIds != null
|
String[] keys) {
|
||||||
&& !namenodeIds.isEmpty();
|
Collection<String> nnIds = getNameNodeIds(conf, nsId);
|
||||||
|
Map<String, InetSocketAddress> ret = Maps.newHashMap();
|
||||||
// Configuration with no federation and ha, return default address
|
for (String nnId : emptyAsSingletonNull(nnIds)) {
|
||||||
if (!federationEnabled && !haEnabled) {
|
String suffix = concatSuffixes(nsId, nnId);
|
||||||
String address = getConfValue(defaultAddress, null, conf, keys);
|
String address = getConfValue(defaultValue, suffix, conf, keys);
|
||||||
if (address == null) {
|
if (address != null) {
|
||||||
return null;
|
InetSocketAddress isa = NetUtils.createSocketAddr(address);
|
||||||
}
|
ret.put(nnId, isa);
|
||||||
isas.add(NetUtils.createSocketAddr(address));
|
|
||||||
return isas;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!federationEnabled) {
|
|
||||||
nameserviceIds = new ArrayList<String>();
|
|
||||||
nameserviceIds.add(null);
|
|
||||||
}
|
|
||||||
if (!haEnabled) {
|
|
||||||
namenodeIds = new ArrayList<String>();
|
|
||||||
namenodeIds.add(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get configuration suffixed with nameserviceId and/or namenodeId
|
|
||||||
if (federationEnabled && haEnabled) {
|
|
||||||
for (String nameserviceId : nameserviceIds) {
|
|
||||||
for (String nnId : namenodeIds) {
|
|
||||||
String keySuffix = concatSuffixes(nameserviceId, nnId);
|
|
||||||
String address = getConfValue(null, keySuffix, conf, keys);
|
|
||||||
if (address != null) {
|
|
||||||
isas.add(NetUtils.createSocketAddr(address));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (!federationEnabled && haEnabled) {
|
|
||||||
for (String nnId : namenodeIds) {
|
|
||||||
String address = getConfValue(null, nnId, conf, keys);
|
|
||||||
if (address != null) {
|
|
||||||
isas.add(NetUtils.createSocketAddr(address));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else if (federationEnabled && !haEnabled) {
|
|
||||||
for (String nameserviceId : nameserviceIds) {
|
|
||||||
String address = getConfValue(null, nameserviceId, conf, keys);
|
|
||||||
if (address != null) {
|
|
||||||
isas.add(NetUtils.createSocketAddr(address));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return isas;
|
return ret;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -436,15 +424,9 @@ private static List<InetSocketAddress> getAddresses(Configuration conf,
|
|||||||
* @return list of InetSocketAddresses
|
* @return list of InetSocketAddresses
|
||||||
* @throws IOException if no addresses are configured
|
* @throws IOException if no addresses are configured
|
||||||
*/
|
*/
|
||||||
public static List<InetSocketAddress> getHaNnRpcAddresses(
|
public static Map<String, Map<String, InetSocketAddress>> getHaNnRpcAddresses(
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) {
|
||||||
List<InetSocketAddress> addressList = getAddresses(conf, null,
|
return getAddresses(conf, null, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||||
DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY);
|
|
||||||
if (addressList == null) {
|
|
||||||
throw new IOException("Incorrect configuration: HA name node addresses "
|
|
||||||
+ DFS_NAMENODE_RPC_ADDRESS_KEY + " is not configured.");
|
|
||||||
}
|
|
||||||
return addressList;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -455,11 +437,11 @@ public static List<InetSocketAddress> getHaNnRpcAddresses(
|
|||||||
* @return list of InetSocketAddresses
|
* @return list of InetSocketAddresses
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
public static List<InetSocketAddress> getBackupNodeAddresses(
|
public static Map<String, Map<String, InetSocketAddress>> getBackupNodeAddresses(
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
List<InetSocketAddress> addressList = getAddresses(conf,
|
Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf,
|
||||||
null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
null, DFS_NAMENODE_BACKUP_ADDRESS_KEY);
|
||||||
if (addressList == null) {
|
if (addressList.isEmpty()) {
|
||||||
throw new IOException("Incorrect configuration: backup node address "
|
throw new IOException("Incorrect configuration: backup node address "
|
||||||
+ DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
|
+ DFS_NAMENODE_BACKUP_ADDRESS_KEY + " is not configured.");
|
||||||
}
|
}
|
||||||
@ -474,11 +456,11 @@ public static List<InetSocketAddress> getBackupNodeAddresses(
|
|||||||
* @return list of InetSocketAddresses
|
* @return list of InetSocketAddresses
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
public static List<InetSocketAddress> getSecondaryNameNodeAddresses(
|
public static Map<String, Map<String, InetSocketAddress>> getSecondaryNameNodeAddresses(
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
List<InetSocketAddress> addressList = getAddresses(conf, null,
|
Map<String, Map<String, InetSocketAddress>> addressList = getAddresses(conf, null,
|
||||||
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
|
||||||
if (addressList == null) {
|
if (addressList.isEmpty()) {
|
||||||
throw new IOException("Incorrect configuration: secondary namenode address "
|
throw new IOException("Incorrect configuration: secondary namenode address "
|
||||||
+ DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
|
+ DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY + " is not configured.");
|
||||||
}
|
}
|
||||||
@ -498,7 +480,7 @@ public static List<InetSocketAddress> getSecondaryNameNodeAddresses(
|
|||||||
* @return list of InetSocketAddress
|
* @return list of InetSocketAddress
|
||||||
* @throws IOException on error
|
* @throws IOException on error
|
||||||
*/
|
*/
|
||||||
public static List<InetSocketAddress> getNNServiceRpcAddresses(
|
public static Map<String, Map<String, InetSocketAddress>> getNNServiceRpcAddresses(
|
||||||
Configuration conf) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
// Use default address as fall back
|
// Use default address as fall back
|
||||||
String defaultAddress;
|
String defaultAddress;
|
||||||
@ -508,9 +490,10 @@ public static List<InetSocketAddress> getNNServiceRpcAddresses(
|
|||||||
defaultAddress = null;
|
defaultAddress = null;
|
||||||
}
|
}
|
||||||
|
|
||||||
List<InetSocketAddress> addressList = getAddresses(conf, defaultAddress,
|
Map<String, Map<String, InetSocketAddress>> addressList =
|
||||||
|
getAddresses(conf, defaultAddress,
|
||||||
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY);
|
||||||
if (addressList == null) {
|
if (addressList.isEmpty()) {
|
||||||
throw new IOException("Incorrect configuration: namenode address "
|
throw new IOException("Incorrect configuration: namenode address "
|
||||||
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
|
+ DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY + " or "
|
||||||
+ DFS_NAMENODE_RPC_ADDRESS_KEY
|
+ DFS_NAMENODE_RPC_ADDRESS_KEY
|
||||||
@ -519,6 +502,77 @@ public static List<InetSocketAddress> getNNServiceRpcAddresses(
|
|||||||
return addressList;
|
return addressList;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Flatten the given map, as returned by other functions in this class,
|
||||||
|
* into a flat list of {@link ConfiguredNNAddress} instances.
|
||||||
|
*/
|
||||||
|
public static List<ConfiguredNNAddress> flattenAddressMap(
|
||||||
|
Map<String, Map<String, InetSocketAddress>> map) {
|
||||||
|
List<ConfiguredNNAddress> ret = Lists.newArrayList();
|
||||||
|
|
||||||
|
for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
|
||||||
|
map.entrySet()) {
|
||||||
|
String nsId = entry.getKey();
|
||||||
|
Map<String, InetSocketAddress> nnMap = entry.getValue();
|
||||||
|
for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
|
||||||
|
String nnId = e2.getKey();
|
||||||
|
InetSocketAddress addr = e2.getValue();
|
||||||
|
|
||||||
|
ret.add(new ConfiguredNNAddress(nsId, nnId, addr));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Format the given map, as returned by other functions in this class,
|
||||||
|
* into a string suitable for debugging display. The format of this string
|
||||||
|
* should not be considered an interface, and is liable to change.
|
||||||
|
*/
|
||||||
|
public static String addressMapToString(
|
||||||
|
Map<String, Map<String, InetSocketAddress>> map) {
|
||||||
|
StringBuilder b = new StringBuilder();
|
||||||
|
for (Map.Entry<String, Map<String, InetSocketAddress>> entry :
|
||||||
|
map.entrySet()) {
|
||||||
|
String nsId = entry.getKey();
|
||||||
|
Map<String, InetSocketAddress> nnMap = entry.getValue();
|
||||||
|
b.append("Nameservice <").append(nsId).append(">:").append("\n");
|
||||||
|
for (Map.Entry<String, InetSocketAddress> e2 : nnMap.entrySet()) {
|
||||||
|
b.append(" NN ID ").append(e2.getKey())
|
||||||
|
.append(" => ").append(e2.getValue()).append("\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Represent one of the NameNodes configured in the cluster.
|
||||||
|
*/
|
||||||
|
public static class ConfiguredNNAddress {
|
||||||
|
private final String nameserviceId;
|
||||||
|
private final String namenodeId;
|
||||||
|
private final InetSocketAddress addr;
|
||||||
|
|
||||||
|
private ConfiguredNNAddress(String nameserviceId, String namenodeId,
|
||||||
|
InetSocketAddress addr) {
|
||||||
|
this.nameserviceId = nameserviceId;
|
||||||
|
this.namenodeId = namenodeId;
|
||||||
|
this.addr = addr;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getNameserviceId() {
|
||||||
|
return nameserviceId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public String getNamenodeId() {
|
||||||
|
return namenodeId;
|
||||||
|
}
|
||||||
|
|
||||||
|
public InetSocketAddress getAddress() {
|
||||||
|
return addr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Given the InetSocketAddress this method returns the nameservice Id
|
* Given the InetSocketAddress this method returns the nameservice Id
|
||||||
* corresponding to the key with matching address, by doing a reverse
|
* corresponding to the key with matching address, by doing a reverse
|
||||||
@ -545,11 +599,8 @@ public static List<InetSocketAddress> getNNServiceRpcAddresses(
|
|||||||
public static String getNameServiceIdFromAddress(final Configuration conf,
|
public static String getNameServiceIdFromAddress(final Configuration conf,
|
||||||
final InetSocketAddress address, String... keys) {
|
final InetSocketAddress address, String... keys) {
|
||||||
// Configuration with a single namenode and no nameserviceId
|
// Configuration with a single namenode and no nameserviceId
|
||||||
if (!isFederationEnabled(conf)) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
String[] ids = getSuffixIDs(conf, address, keys);
|
String[] ids = getSuffixIDs(conf, address, keys);
|
||||||
return (ids != null && ids.length > 0) ? ids[0] : null;
|
return (ids != null) ? ids[0] : null;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -715,14 +766,6 @@ public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
|
|||||||
ClientDatanodeProtocolTranslatorR23(datanodeid, conf, socketTimeout,
|
ClientDatanodeProtocolTranslatorR23(datanodeid, conf, socketTimeout,
|
||||||
locatedBlock);
|
locatedBlock);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Returns true if federation configuration is enabled
|
|
||||||
*/
|
|
||||||
public static boolean isFederationEnabled(Configuration conf) {
|
|
||||||
Collection<String> collection = getNameServiceIds(conf);
|
|
||||||
return collection != null && collection.size() != 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
|
/** Create {@link ClientDatanodeProtocol} proxy using kerberos ticket */
|
||||||
static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
|
static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
|
||||||
@ -783,16 +826,9 @@ private static String getNameServiceId(Configuration conf, String addressKey) {
|
|||||||
if (nameserviceId != null) {
|
if (nameserviceId != null) {
|
||||||
return nameserviceId;
|
return nameserviceId;
|
||||||
}
|
}
|
||||||
if (!isFederationEnabled(conf)) {
|
String nnId = conf.get(DFS_HA_NAMENODE_ID_KEY);
|
||||||
return null;
|
|
||||||
}
|
return getSuffixIDs(conf, addressKey, null, nnId, LOCAL_ADDRESS_MATCHER)[0];
|
||||||
nameserviceId = getSuffixIDs(conf, addressKey, LOCAL_ADDRESS_MATCHER)[0];
|
|
||||||
if (nameserviceId == null) {
|
|
||||||
String msg = "Configuration " + addressKey + " must be suffixed with" +
|
|
||||||
" nameserviceId for federation configuration.";
|
|
||||||
throw new HadoopIllegalArgumentException(msg);
|
|
||||||
}
|
|
||||||
return nameserviceId;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -801,6 +837,8 @@ private static String getNameServiceId(Configuration conf, String addressKey) {
|
|||||||
*
|
*
|
||||||
* @param conf Configuration
|
* @param conf Configuration
|
||||||
* @param addressKey configuration key corresponding to the address.
|
* @param addressKey configuration key corresponding to the address.
|
||||||
|
* @param knownNsId only look at configs for the given nameservice, if not-null
|
||||||
|
* @param knownNNId only look at configs for the given namenode, if not null
|
||||||
* @param matcher matching criteria for matching the address
|
* @param matcher matching criteria for matching the address
|
||||||
* @return Array with nameservice Id and namenode Id on success. First element
|
* @return Array with nameservice Id and namenode Id on success. First element
|
||||||
* in the array is nameservice Id and second element is namenode Id.
|
* in the array is nameservice Id and second element is namenode Id.
|
||||||
@ -809,29 +847,23 @@ private static String getNameServiceId(Configuration conf, String addressKey) {
|
|||||||
* @throws HadoopIllegalArgumentException on error
|
* @throws HadoopIllegalArgumentException on error
|
||||||
*/
|
*/
|
||||||
static String[] getSuffixIDs(final Configuration conf, final String addressKey,
|
static String[] getSuffixIDs(final Configuration conf, final String addressKey,
|
||||||
|
String knownNsId, String knownNNId,
|
||||||
final AddressMatcher matcher) {
|
final AddressMatcher matcher) {
|
||||||
Collection<String> nsIds = getNameServiceIds(conf);
|
|
||||||
boolean federationEnabled = true;
|
|
||||||
if (nsIds == null || nsIds.size() == 0) {
|
|
||||||
federationEnabled = false; // federation not configured
|
|
||||||
nsIds = new ArrayList<String>();
|
|
||||||
nsIds.add(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
boolean haEnabled = true;
|
|
||||||
Collection<String> nnIds = getNameNodeIds(conf);
|
|
||||||
if (nnIds == null || nnIds.size() == 0) {
|
|
||||||
haEnabled = false; // HA not configured
|
|
||||||
nnIds = new ArrayList<String>();
|
|
||||||
nnIds.add(null);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match the address from addressKey.nsId.nnId based on the given matcher
|
|
||||||
String nameserviceId = null;
|
String nameserviceId = null;
|
||||||
String namenodeId = null;
|
String namenodeId = null;
|
||||||
int found = 0;
|
int found = 0;
|
||||||
for (String nsId : nsIds) {
|
|
||||||
for (String nnId : nnIds) {
|
Collection<String> nsIds = getNameServiceIds(conf);
|
||||||
|
for (String nsId : emptyAsSingletonNull(nsIds)) {
|
||||||
|
if (knownNsId != null && !knownNsId.equals(nsId)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
Collection<String> nnIds = getNameNodeIds(conf, nsId);
|
||||||
|
for (String nnId : emptyAsSingletonNull(nnIds)) {
|
||||||
|
if (knownNNId != null && !knownNNId.equals(nnId)) {
|
||||||
|
continue;
|
||||||
|
}
|
||||||
String key = addKeySuffixes(addressKey, nsId, nnId);
|
String key = addKeySuffixes(addressKey, nsId, nnId);
|
||||||
String addr = conf.get(key);
|
String addr = conf.get(key);
|
||||||
InetSocketAddress s = null;
|
InetSocketAddress s = null;
|
||||||
@ -850,8 +882,8 @@ static String[] getSuffixIDs(final Configuration conf, final String addressKey,
|
|||||||
if (found > 1) { // Only one address must match the local address
|
if (found > 1) { // Only one address must match the local address
|
||||||
String msg = "Configuration has multiple addresses that match "
|
String msg = "Configuration has multiple addresses that match "
|
||||||
+ "local node's address. Please configure the system with "
|
+ "local node's address. Please configure the system with "
|
||||||
+ (federationEnabled ? DFS_FEDERATION_NAMESERVICE_ID : "")
|
+ DFS_FEDERATION_NAMESERVICE_ID + " and "
|
||||||
+ (haEnabled ? (" and " + DFS_HA_NAMENODE_ID_KEY) : "");
|
+ DFS_HA_NAMENODE_ID_KEY;
|
||||||
throw new HadoopIllegalArgumentException(msg);
|
throw new HadoopIllegalArgumentException(msg);
|
||||||
}
|
}
|
||||||
return new String[] { nameserviceId, namenodeId };
|
return new String[] { nameserviceId, namenodeId };
|
||||||
@ -872,7 +904,7 @@ public boolean match(InetSocketAddress s) {
|
|||||||
};
|
};
|
||||||
|
|
||||||
for (String key : keys) {
|
for (String key : keys) {
|
||||||
String[] ids = getSuffixIDs(conf, key, matcher);
|
String[] ids = getSuffixIDs(conf, key, null, null, matcher);
|
||||||
if (ids != null && (ids [0] != null || ids[1] != null)) {
|
if (ids != null && (ids [0] != null || ids[1] != null)) {
|
||||||
return ids;
|
return ids;
|
||||||
}
|
}
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
|
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
@ -29,14 +30,18 @@ public class HAUtil {
|
|||||||
private HAUtil() { /* Hidden constructor */ }
|
private HAUtil() { /* Hidden constructor */ }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if HA for namenode is configured.
|
* Returns true if HA for namenode is configured for the given nameservice
|
||||||
*
|
*
|
||||||
* @param conf Configuration
|
* @param conf Configuration
|
||||||
|
* @param nsId nameservice, or null if no federated NS is configured
|
||||||
* @return true if HA is configured in the configuration; else false.
|
* @return true if HA is configured in the configuration; else false.
|
||||||
*/
|
*/
|
||||||
public static boolean isHAEnabled(Configuration conf) {
|
public static boolean isHAEnabled(Configuration conf, String nsId) {
|
||||||
Collection<String> collection = DFSUtil.getNameNodeIds(conf);
|
Map<String, Map<String, InetSocketAddress>> addresses =
|
||||||
return collection != null && !collection.isEmpty();
|
DFSUtil.getHaNnRpcAddresses(conf);
|
||||||
|
if (addresses == null) return false;
|
||||||
|
Map<String, InetSocketAddress> nnMap = addresses.get(nsId);
|
||||||
|
return nnMap != null && nnMap.size() > 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -52,22 +57,21 @@ public static boolean isHAEnabled(Configuration conf) {
|
|||||||
* @return namenode Id on success, null on failure.
|
* @return namenode Id on success, null on failure.
|
||||||
* @throws HadoopIllegalArgumentException on error
|
* @throws HadoopIllegalArgumentException on error
|
||||||
*/
|
*/
|
||||||
public static String getNameNodeId(Configuration conf) {
|
public static String getNameNodeId(Configuration conf, String nsId) {
|
||||||
String namenodeId = conf.get(DFS_HA_NAMENODE_ID_KEY);
|
String namenodeId = conf.getTrimmed(DFS_HA_NAMENODE_ID_KEY);
|
||||||
if (namenodeId != null) {
|
if (namenodeId != null) {
|
||||||
return namenodeId;
|
return namenodeId;
|
||||||
}
|
}
|
||||||
if (!isHAEnabled(conf)) {
|
|
||||||
return null;
|
String suffixes[] = DFSUtil.getSuffixIDs(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
|
||||||
}
|
nsId, null, DFSUtil.LOCAL_ADDRESS_MATCHER);
|
||||||
namenodeId = DFSUtil.getSuffixIDs(conf, DFS_NAMENODE_RPC_ADDRESS_KEY,
|
if (suffixes == null) {
|
||||||
DFSUtil.LOCAL_ADDRESS_MATCHER)[1];
|
|
||||||
if (namenodeId == null) {
|
|
||||||
String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY +
|
String msg = "Configuration " + DFS_NAMENODE_RPC_ADDRESS_KEY +
|
||||||
" must be suffixed with" + namenodeId + " for HA configuration.";
|
" must be suffixed with" + namenodeId + " for HA configuration.";
|
||||||
throw new HadoopIllegalArgumentException(msg);
|
throw new HadoopIllegalArgumentException(msg);
|
||||||
}
|
}
|
||||||
return namenodeId;
|
|
||||||
|
return suffixes[1];
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -78,14 +82,11 @@ public static String getNameNodeId(Configuration conf) {
|
|||||||
public static String getNameNodeIdFromAddress(final Configuration conf,
|
public static String getNameNodeIdFromAddress(final Configuration conf,
|
||||||
final InetSocketAddress address, String... keys) {
|
final InetSocketAddress address, String... keys) {
|
||||||
// Configuration with a single namenode and no nameserviceId
|
// Configuration with a single namenode and no nameserviceId
|
||||||
if (!isHAEnabled(conf)) {
|
|
||||||
return null;
|
|
||||||
}
|
|
||||||
|
|
||||||
String[] ids = DFSUtil.getSuffixIDs(conf, address, keys);
|
String[] ids = DFSUtil.getSuffixIDs(conf, address, keys);
|
||||||
if (ids != null && ids.length > 1) {
|
if (ids != null && ids.length > 1) {
|
||||||
return ids[1];
|
return ids[1];
|
||||||
}
|
}
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
import java.util.LinkedList;
|
import java.util.LinkedList;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
import java.util.concurrent.ExecutionException;
|
import java.util.concurrent.ExecutionException;
|
||||||
import java.util.concurrent.ExecutorService;
|
import java.util.concurrent.ExecutorService;
|
||||||
import java.util.concurrent.Executors;
|
import java.util.concurrent.Executors;
|
||||||
@ -1379,7 +1380,8 @@ private ReturnStatus run(int iteration, Formatter formatter) {
|
|||||||
* for each namenode,
|
* for each namenode,
|
||||||
* execute a {@link Balancer} to work through all datanodes once.
|
* execute a {@link Balancer} to work through all datanodes once.
|
||||||
*/
|
*/
|
||||||
static int run(List<InetSocketAddress> namenodes, final Parameters p,
|
static int run(Map<String, Map<String, InetSocketAddress>> namenodes,
|
||||||
|
final Parameters p,
|
||||||
Configuration conf) throws IOException, InterruptedException {
|
Configuration conf) throws IOException, InterruptedException {
|
||||||
final long sleeptime = 2000*conf.getLong(
|
final long sleeptime = 2000*conf.getLong(
|
||||||
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY,
|
||||||
@ -1393,8 +1395,10 @@ static int run(List<InetSocketAddress> namenodes, final Parameters p,
|
|||||||
final List<NameNodeConnector> connectors
|
final List<NameNodeConnector> connectors
|
||||||
= new ArrayList<NameNodeConnector>(namenodes.size());
|
= new ArrayList<NameNodeConnector>(namenodes.size());
|
||||||
try {
|
try {
|
||||||
for(InetSocketAddress isa : namenodes) {
|
for(Entry<String, Map<String, InetSocketAddress>> entry :
|
||||||
connectors.add(new NameNodeConnector(isa, conf));
|
namenodes.entrySet()) {
|
||||||
|
connectors.add(
|
||||||
|
new NameNodeConnector(entry.getValue().values(), conf));
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean done = false;
|
boolean done = false;
|
||||||
@ -1476,7 +1480,8 @@ public int run(String[] args) {
|
|||||||
try {
|
try {
|
||||||
checkReplicationPolicyCompatibility(conf);
|
checkReplicationPolicyCompatibility(conf);
|
||||||
|
|
||||||
final List<InetSocketAddress> namenodes = DFSUtil.getNNServiceRpcAddresses(conf);
|
final Map<String, Map<String, InetSocketAddress>> namenodes =
|
||||||
|
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||||
return Balancer.run(namenodes, parse(args), conf);
|
return Balancer.run(namenodes, parse(args), conf);
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
System.out.println(e + ". Exiting ...");
|
System.out.println(e + ". Exiting ...");
|
||||||
|
@ -22,6 +22,8 @@
|
|||||||
import java.io.OutputStream;
|
import java.io.OutputStream;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
import java.util.ArrayList;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.EnumSet;
|
import java.util.EnumSet;
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
@ -53,6 +55,9 @@
|
|||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
|
|
||||||
|
import com.google.common.collect.Collections2;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The class provides utilities for {@link Balancer} to access a NameNode
|
* The class provides utilities for {@link Balancer} to access a NameNode
|
||||||
*/
|
*/
|
||||||
@ -75,12 +80,14 @@ class NameNodeConnector {
|
|||||||
private BlockTokenSecretManager blockTokenSecretManager;
|
private BlockTokenSecretManager blockTokenSecretManager;
|
||||||
private Daemon keyupdaterthread; // AccessKeyUpdater thread
|
private Daemon keyupdaterthread; // AccessKeyUpdater thread
|
||||||
|
|
||||||
NameNodeConnector(InetSocketAddress namenodeAddress, Configuration conf
|
NameNodeConnector(Collection<InetSocketAddress> haNNs,
|
||||||
) throws IOException {
|
Configuration conf) throws IOException {
|
||||||
this.namenodeAddress = namenodeAddress;
|
InetSocketAddress nn = Lists.newArrayList(haNNs).get(0);
|
||||||
this.namenode = createNamenode(namenodeAddress, conf);
|
// TODO(HA): need to deal with connecting to HA NN pair here
|
||||||
|
this.namenodeAddress = nn;
|
||||||
|
this.namenode = createNamenode(nn, conf);
|
||||||
this.client = DFSUtil.createNamenode(conf);
|
this.client = DFSUtil.createNamenode(conf);
|
||||||
this.fs = FileSystem.get(NameNode.getUri(namenodeAddress), conf);
|
this.fs = FileSystem.get(NameNode.getUri(nn), conf);
|
||||||
|
|
||||||
final NamespaceInfo namespaceinfo = namenode.versionRequest();
|
final NamespaceInfo namespaceinfo = namenode.versionRequest();
|
||||||
this.blockpoolID = namespaceinfo.getBlockPoolID();
|
this.blockpoolID = namespaceinfo.getBlockPoolID();
|
||||||
|
@ -77,6 +77,7 @@
|
|||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
|
import java.util.Map.Entry;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.atomic.AtomicInteger;
|
import java.util.concurrent.atomic.AtomicInteger;
|
||||||
|
|
||||||
@ -92,6 +93,7 @@
|
|||||||
import org.apache.hadoop.fs.permission.FsPermission;
|
import org.apache.hadoop.fs.permission.FsPermission;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||||
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.protocol.Block;
|
import org.apache.hadoop.hdfs.protocol.Block;
|
||||||
@ -168,6 +170,8 @@
|
|||||||
import org.mortbay.util.ajax.JSON;
|
import org.mortbay.util.ajax.JSON;
|
||||||
|
|
||||||
import com.google.common.base.Preconditions;
|
import com.google.common.base.Preconditions;
|
||||||
|
import com.google.common.collect.Lists;
|
||||||
|
import com.google.common.collect.Sets;
|
||||||
|
|
||||||
|
|
||||||
/**********************************************************
|
/**********************************************************
|
||||||
@ -251,8 +255,14 @@ class BlockPoolManager {
|
|||||||
bpMapping = new HashMap<String, BPOfferService>();
|
bpMapping = new HashMap<String, BPOfferService>();
|
||||||
nameNodeThreads = new HashMap<InetSocketAddress, BPOfferService>();
|
nameNodeThreads = new HashMap<InetSocketAddress, BPOfferService>();
|
||||||
|
|
||||||
List<InetSocketAddress> isas = DFSUtil.getNNServiceRpcAddresses(conf);
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
for(InetSocketAddress isa : isas) {
|
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||||
|
for (Entry<String, Map<String, InetSocketAddress>> entry :
|
||||||
|
map.entrySet()) {
|
||||||
|
List<InetSocketAddress> nnList = Lists.newArrayList(entry.getValue().values());
|
||||||
|
// TODO(HA) when HDFS-1971 (dual BRs) is done, pass all of the NNs
|
||||||
|
// to BPOS
|
||||||
|
InetSocketAddress isa = nnList.get(0);
|
||||||
BPOfferService bpos = new BPOfferService(isa, DataNode.this);
|
BPOfferService bpos = new BPOfferService(isa, DataNode.this);
|
||||||
nameNodeThreads.put(bpos.getNNSocketAddress(), bpos);
|
nameNodeThreads.put(bpos.getNNSocketAddress(), bpos);
|
||||||
}
|
}
|
||||||
@ -333,8 +343,16 @@ void refreshNamenodes(Configuration conf)
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
LOG.info("Refresh request received for nameservices: "
|
LOG.info("Refresh request received for nameservices: "
|
||||||
+ conf.get(DFS_FEDERATION_NAMESERVICES));
|
+ conf.get(DFS_FEDERATION_NAMESERVICES));
|
||||||
List<InetSocketAddress> newAddresses =
|
|
||||||
|
// TODO(HA): need to update this for multiple NNs per nameservice
|
||||||
|
// For now, just list all of the NNs into this set
|
||||||
|
Map<String, Map<String, InetSocketAddress>> newAddressMap =
|
||||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||||
|
Set<InetSocketAddress> newAddresses = Sets.newHashSet();
|
||||||
|
for (ConfiguredNNAddress cnn : DFSUtil.flattenAddressMap(newAddressMap)) {
|
||||||
|
newAddresses.add(cnn.getAddress());
|
||||||
|
}
|
||||||
|
|
||||||
List<BPOfferService> toShutdown = new ArrayList<BPOfferService>();
|
List<BPOfferService> toShutdown = new ArrayList<BPOfferService>();
|
||||||
List<InetSocketAddress> toStart = new ArrayList<InetSocketAddress>();
|
List<InetSocketAddress> toStart = new ArrayList<InetSocketAddress>();
|
||||||
synchronized (refreshNamenodesLock) {
|
synchronized (refreshNamenodesLock) {
|
||||||
|
@ -39,6 +39,7 @@
|
|||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.codehaus.jackson.JsonNode;
|
import org.codehaus.jackson.JsonNode;
|
||||||
@ -66,9 +67,10 @@ class ClusterJspHelper {
|
|||||||
ClusterStatus generateClusterHealthReport() {
|
ClusterStatus generateClusterHealthReport() {
|
||||||
ClusterStatus cs = new ClusterStatus();
|
ClusterStatus cs = new ClusterStatus();
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
List<InetSocketAddress> isas = null;
|
List<ConfiguredNNAddress> nns = null;
|
||||||
try {
|
try {
|
||||||
isas = DFSUtil.getNNServiceRpcAddresses(conf);
|
nns = DFSUtil.flattenAddressMap(
|
||||||
|
DFSUtil.getNNServiceRpcAddresses(conf));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// Could not build cluster status
|
// Could not build cluster status
|
||||||
cs.setError(e);
|
cs.setError(e);
|
||||||
@ -76,7 +78,8 @@ ClusterStatus generateClusterHealthReport() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Process each namenode and add it to ClusterStatus
|
// Process each namenode and add it to ClusterStatus
|
||||||
for (InetSocketAddress isa : isas) {
|
for (ConfiguredNNAddress cnn : nns) {
|
||||||
|
InetSocketAddress isa = cnn.getAddress();
|
||||||
NamenodeMXBeanHelper nnHelper = null;
|
NamenodeMXBeanHelper nnHelper = null;
|
||||||
try {
|
try {
|
||||||
nnHelper = new NamenodeMXBeanHelper(isa, conf);
|
nnHelper = new NamenodeMXBeanHelper(isa, conf);
|
||||||
@ -102,9 +105,10 @@ ClusterStatus generateClusterHealthReport() {
|
|||||||
DecommissionStatus generateDecommissioningReport() {
|
DecommissionStatus generateDecommissioningReport() {
|
||||||
String clusterid = "";
|
String clusterid = "";
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
List<InetSocketAddress> isas = null;
|
List<ConfiguredNNAddress> cnns = null;
|
||||||
try {
|
try {
|
||||||
isas = DFSUtil.getNNServiceRpcAddresses(conf);
|
cnns = DFSUtil.flattenAddressMap(
|
||||||
|
DFSUtil.getNNServiceRpcAddresses(conf));
|
||||||
} catch (Exception e) {
|
} catch (Exception e) {
|
||||||
// catch any exception encountered other than connecting to namenodes
|
// catch any exception encountered other than connecting to namenodes
|
||||||
DecommissionStatus dInfo = new DecommissionStatus(clusterid, e);
|
DecommissionStatus dInfo = new DecommissionStatus(clusterid, e);
|
||||||
@ -122,7 +126,8 @@ DecommissionStatus generateDecommissioningReport() {
|
|||||||
new HashMap<String, Exception>();
|
new HashMap<String, Exception>();
|
||||||
|
|
||||||
List<String> unreportedNamenode = new ArrayList<String>();
|
List<String> unreportedNamenode = new ArrayList<String>();
|
||||||
for (InetSocketAddress isa : isas) {
|
for (ConfiguredNNAddress cnn : cnns) {
|
||||||
|
InetSocketAddress isa = cnn.getAddress();
|
||||||
NamenodeMXBeanHelper nnHelper = null;
|
NamenodeMXBeanHelper nnHelper = null;
|
||||||
try {
|
try {
|
||||||
nnHelper = new NamenodeMXBeanHelper(isa, conf);
|
nnHelper = new NamenodeMXBeanHelper(isa, conf);
|
||||||
|
@ -527,10 +527,11 @@ protected NameNode(Configuration conf, NamenodeRole role)
|
|||||||
throws IOException {
|
throws IOException {
|
||||||
this.conf = conf;
|
this.conf = conf;
|
||||||
this.role = role;
|
this.role = role;
|
||||||
this.haEnabled = HAUtil.isHAEnabled(conf);
|
String nsId = getNameServiceId(conf);
|
||||||
|
this.haEnabled = HAUtil.isHAEnabled(conf, nsId);
|
||||||
this.haContext = new NameNodeHAContext();
|
this.haContext = new NameNodeHAContext();
|
||||||
try {
|
try {
|
||||||
initializeGenericKeys(conf, getNameServiceId(conf));
|
initializeGenericKeys(conf, nsId);
|
||||||
initialize(conf);
|
initialize(conf);
|
||||||
if (!haEnabled) {
|
if (!haEnabled) {
|
||||||
state = ACTIVE_STATE;
|
state = ACTIVE_STATE;
|
||||||
@ -848,7 +849,7 @@ public static NameNode createNameNode(String argv[], Configuration conf)
|
|||||||
*/
|
*/
|
||||||
public static void initializeGenericKeys(Configuration conf, String
|
public static void initializeGenericKeys(Configuration conf, String
|
||||||
nameserviceId) {
|
nameserviceId) {
|
||||||
String namenodeId = HAUtil.getNameNodeId(conf);
|
String namenodeId = HAUtil.getNameNodeId(conf, nameserviceId);
|
||||||
if ((nameserviceId == null || nameserviceId.isEmpty()) &&
|
if ((nameserviceId == null || nameserviceId.isEmpty()) &&
|
||||||
(namenodeId == null || namenodeId.isEmpty())) {
|
(namenodeId == null || namenodeId.isEmpty())) {
|
||||||
return;
|
return;
|
||||||
|
@ -22,6 +22,7 @@
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
import org.apache.commons.logging.LogFactory;
|
import org.apache.commons.logging.LogFactory;
|
||||||
@ -89,9 +90,14 @@ public synchronized void setConf(Configuration conf) {
|
|||||||
try {
|
try {
|
||||||
ugi = UserGroupInformation.getCurrentUser();
|
ugi = UserGroupInformation.getCurrentUser();
|
||||||
|
|
||||||
Collection<InetSocketAddress> addresses = DFSUtil.getHaNnRpcAddresses(
|
Map<String, Map<String, InetSocketAddress>> map = DFSUtil.getHaNnRpcAddresses(
|
||||||
conf);
|
conf);
|
||||||
for (InetSocketAddress address : addresses) {
|
// TODO(HA): currently hardcoding the nameservice used by MiniDFSCluster.
|
||||||
|
// We need to somehow communicate this into the proxy provider.
|
||||||
|
String nsId = "nameserviceId1";
|
||||||
|
Map<String, InetSocketAddress> addressesInNN = map.get(nsId);
|
||||||
|
|
||||||
|
for (InetSocketAddress address : addressesInNN.values()) {
|
||||||
proxies.add(new AddressRpcProxyPair(address));
|
proxies.add(new AddressRpcProxyPair(address));
|
||||||
}
|
}
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
|
@ -21,12 +21,15 @@
|
|||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.security.PrivilegedExceptionAction;
|
import java.security.PrivilegedExceptionAction;
|
||||||
|
import java.util.Collection;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
import org.apache.hadoop.util.ToolRunner;
|
import org.apache.hadoop.util.ToolRunner;
|
||||||
@ -155,7 +158,7 @@ int doWorkInternal(GetConf tool) throws Exception {
|
|||||||
static class NameNodesCommandHandler extends CommandHandler {
|
static class NameNodesCommandHandler extends CommandHandler {
|
||||||
@Override
|
@Override
|
||||||
int doWorkInternal(GetConf tool) throws IOException {
|
int doWorkInternal(GetConf tool) throws IOException {
|
||||||
tool.printList(DFSUtil.getNNServiceRpcAddresses(tool.getConf()));
|
tool.printMap(DFSUtil.getNNServiceRpcAddresses(tool.getConf()));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -166,7 +169,7 @@ int doWorkInternal(GetConf tool) throws IOException {
|
|||||||
static class BackupNodesCommandHandler extends CommandHandler {
|
static class BackupNodesCommandHandler extends CommandHandler {
|
||||||
@Override
|
@Override
|
||||||
public int doWorkInternal(GetConf tool) throws IOException {
|
public int doWorkInternal(GetConf tool) throws IOException {
|
||||||
tool.printList(DFSUtil.getBackupNodeAddresses(tool.getConf()));
|
tool.printMap(DFSUtil.getBackupNodeAddresses(tool.getConf()));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -177,7 +180,7 @@ public int doWorkInternal(GetConf tool) throws IOException {
|
|||||||
static class SecondaryNameNodesCommandHandler extends CommandHandler {
|
static class SecondaryNameNodesCommandHandler extends CommandHandler {
|
||||||
@Override
|
@Override
|
||||||
public int doWorkInternal(GetConf tool) throws IOException {
|
public int doWorkInternal(GetConf tool) throws IOException {
|
||||||
tool.printList(DFSUtil.getSecondaryNameNodeAddresses(tool.getConf()));
|
tool.printMap(DFSUtil.getSecondaryNameNodeAddresses(tool.getConf()));
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -191,9 +194,11 @@ static class NNRpcAddressesCommandHandler extends CommandHandler {
|
|||||||
@Override
|
@Override
|
||||||
public int doWorkInternal(GetConf tool) throws IOException {
|
public int doWorkInternal(GetConf tool) throws IOException {
|
||||||
Configuration config = tool.getConf();
|
Configuration config = tool.getConf();
|
||||||
List<InetSocketAddress> rpclist = DFSUtil.getNNServiceRpcAddresses(config);
|
List<ConfiguredNNAddress> cnnlist = DFSUtil.flattenAddressMap(
|
||||||
if (rpclist != null) {
|
DFSUtil.getNNServiceRpcAddresses(config));
|
||||||
for (InetSocketAddress rpc : rpclist) {
|
if (!cnnlist.isEmpty()) {
|
||||||
|
for (ConfiguredNNAddress cnn : cnnlist) {
|
||||||
|
InetSocketAddress rpc = cnn.getAddress();
|
||||||
tool.printOut(rpc.getHostName()+":"+rpc.getPort());
|
tool.printOut(rpc.getHostName()+":"+rpc.getPort());
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
@ -223,10 +228,13 @@ void printError(String message) {
|
|||||||
void printOut(String message) {
|
void printOut(String message) {
|
||||||
out.println(message);
|
out.println(message);
|
||||||
}
|
}
|
||||||
|
|
||||||
void printList(List<InetSocketAddress> list) {
|
void printMap(Map<String, Map<String, InetSocketAddress>> map) {
|
||||||
StringBuilder buffer = new StringBuilder();
|
StringBuilder buffer = new StringBuilder();
|
||||||
for (InetSocketAddress address : list) {
|
|
||||||
|
List<ConfiguredNNAddress> cnns = DFSUtil.flattenAddressMap(map);
|
||||||
|
for (ConfiguredNNAddress cnn : cnns) {
|
||||||
|
InetSocketAddress address = cnn.getAddress();
|
||||||
if (buffer.length() > 0) {
|
if (buffer.length() > 0) {
|
||||||
buffer.append(" ");
|
buffer.append(" ");
|
||||||
}
|
}
|
||||||
|
@ -28,6 +28,7 @@
|
|||||||
import java.util.Collection;
|
import java.util.Collection;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
import org.apache.hadoop.HadoopIllegalArgumentException;
|
import org.apache.hadoop.HadoopIllegalArgumentException;
|
||||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||||
@ -183,14 +184,19 @@ public void testMultipleNamenodes() throws IOException {
|
|||||||
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"),
|
||||||
NN2_ADDRESS);
|
NN2_ADDRESS);
|
||||||
|
|
||||||
Collection<InetSocketAddress> nnAddresses = DFSUtil
|
Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil
|
||||||
.getNNServiceRpcAddresses(conf);
|
.getNNServiceRpcAddresses(conf);
|
||||||
assertEquals(2, nnAddresses.size());
|
assertEquals(2, nnMap.size());
|
||||||
Iterator<InetSocketAddress> iterator = nnAddresses.iterator();
|
|
||||||
InetSocketAddress addr = iterator.next();
|
Map<String, InetSocketAddress> nn1Map = nnMap.get("nn1");
|
||||||
|
assertEquals(1, nn1Map.size());
|
||||||
|
InetSocketAddress addr = nn1Map.get(null);
|
||||||
assertEquals("localhost", addr.getHostName());
|
assertEquals("localhost", addr.getHostName());
|
||||||
assertEquals(9000, addr.getPort());
|
assertEquals(9000, addr.getPort());
|
||||||
addr = iterator.next();
|
|
||||||
|
Map<String, InetSocketAddress> nn2Map = nnMap.get("nn2");
|
||||||
|
assertEquals(1, nn2Map.size());
|
||||||
|
addr = nn2Map.get(null);
|
||||||
assertEquals("localhost", addr.getHostName());
|
assertEquals("localhost", addr.getHostName());
|
||||||
assertEquals(9001, addr.getPort());
|
assertEquals(9001, addr.getPort());
|
||||||
|
|
||||||
@ -237,9 +243,14 @@ public void testDefaultNamenode() throws IOException {
|
|||||||
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
|
conf.set(FS_DEFAULT_NAME_KEY, hdfs_default);
|
||||||
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
|
// If DFS_FEDERATION_NAMESERVICES is not set, verify that
|
||||||
// default namenode address is returned.
|
// default namenode address is returned.
|
||||||
List<InetSocketAddress> addrList = DFSUtil.getNNServiceRpcAddresses(conf);
|
Map<String, Map<String, InetSocketAddress>> addrMap =
|
||||||
assertEquals(1, addrList.size());
|
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||||
assertEquals(9999, addrList.get(0).getPort());
|
assertEquals(1, addrMap.size());
|
||||||
|
|
||||||
|
Map<String, InetSocketAddress> defaultNsMap = addrMap.get(null);
|
||||||
|
assertEquals(1, defaultNsMap.size());
|
||||||
|
|
||||||
|
assertEquals(9999, defaultNsMap.get(null).getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -279,22 +290,28 @@ public void testConfModification() {
|
|||||||
public void testEmptyConf() {
|
public void testEmptyConf() {
|
||||||
HdfsConfiguration conf = new HdfsConfiguration(false);
|
HdfsConfiguration conf = new HdfsConfiguration(false);
|
||||||
try {
|
try {
|
||||||
DFSUtil.getNNServiceRpcAddresses(conf);
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
fail("Expected IOException is not thrown");
|
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||||
|
fail("Expected IOException is not thrown, result was: " +
|
||||||
|
DFSUtil.addressMapToString(map));
|
||||||
} catch (IOException expected) {
|
} catch (IOException expected) {
|
||||||
/** Expected */
|
/** Expected */
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
DFSUtil.getBackupNodeAddresses(conf);
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
fail("Expected IOException is not thrown");
|
DFSUtil.getBackupNodeAddresses(conf);
|
||||||
|
fail("Expected IOException is not thrown, result was: " +
|
||||||
|
DFSUtil.addressMapToString(map));
|
||||||
} catch (IOException expected) {
|
} catch (IOException expected) {
|
||||||
/** Expected */
|
/** Expected */
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
DFSUtil.getSecondaryNameNodeAddresses(conf);
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
fail("Expected IOException is not thrown");
|
DFSUtil.getSecondaryNameNodeAddresses(conf);
|
||||||
|
fail("Expected IOException is not thrown, result was: " +
|
||||||
|
DFSUtil.addressMapToString(map));
|
||||||
} catch (IOException expected) {
|
} catch (IOException expected) {
|
||||||
/** Expected */
|
/** Expected */
|
||||||
}
|
}
|
||||||
@ -310,5 +327,44 @@ public void testGetServerInfo() {
|
|||||||
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
String httpport = DFSUtil.getInfoServer(null, conf, false);
|
||||||
assertEquals("0.0.0.0:50070", httpport);
|
assertEquals("0.0.0.0:50070", httpport);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testHANameNodesWithFederation() {
|
||||||
|
HdfsConfiguration conf = new HdfsConfiguration();
|
||||||
|
|
||||||
|
final String NS1_NN1_HOST = "ns1-nn1.example.com:8020";
|
||||||
|
final String NS1_NN2_HOST = "ns1-nn2.example.com:8020";
|
||||||
|
final String NS2_NN1_HOST = "ns2-nn1.example.com:8020";
|
||||||
|
final String NS2_NN2_HOST = "ns2-nn2.example.com:8020";
|
||||||
|
|
||||||
|
// Two nameservices, each with two NNs.
|
||||||
|
conf.set(DFS_FEDERATION_NAMESERVICES, "ns1,ns2");
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY, "ns1"),
|
||||||
|
"ns1-nn1,ns1-nn2");
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY, "ns2"),
|
||||||
|
"ns2-nn1,ns2-nn2");
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn1"),
|
||||||
|
NS1_NN1_HOST);
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns1", "ns1-nn2"),
|
||||||
|
NS1_NN2_HOST);
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
|
||||||
|
NS2_NN1_HOST);
|
||||||
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
|
||||||
|
NS2_NN2_HOST);
|
||||||
|
|
||||||
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
|
DFSUtil.getHaNnRpcAddresses(conf);
|
||||||
|
System.err.println("TestHANameNodesWithFederation:\n" +
|
||||||
|
DFSUtil.addressMapToString(map));
|
||||||
|
|
||||||
|
assertEquals(NS1_NN1_HOST, map.get("ns1").get("ns1-nn1").toString());
|
||||||
|
assertEquals(NS1_NN2_HOST, map.get("ns1").get("ns1-nn2").toString());
|
||||||
|
assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
|
||||||
|
assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
@ -22,6 +22,7 @@
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
@ -330,8 +331,8 @@ private void runBalancer(Configuration conf,
|
|||||||
waitForHeartBeat(totalUsedSpace, totalCapacity);
|
waitForHeartBeat(totalUsedSpace, totalCapacity);
|
||||||
|
|
||||||
// start rebalancing
|
// start rebalancing
|
||||||
final List<InetSocketAddress> namenodes =new ArrayList<InetSocketAddress>();
|
Map<String, Map<String, InetSocketAddress>> namenodes =
|
||||||
namenodes.add(NameNode.getServiceAddress(conf, true));
|
DFSUtil.getNNServiceRpcAddresses(conf);
|
||||||
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
|
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, conf);
|
||||||
assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
||||||
|
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
|
|
||||||
import org.apache.commons.logging.Log;
|
import org.apache.commons.logging.Log;
|
||||||
@ -157,7 +158,8 @@ static void runBalancer(Suite s,
|
|||||||
LOG.info("BALANCER 1");
|
LOG.info("BALANCER 1");
|
||||||
|
|
||||||
// start rebalancing
|
// start rebalancing
|
||||||
final List<InetSocketAddress> namenodes = DFSUtil.getNNServiceRpcAddresses(s.conf);
|
final Map<String, Map<String, InetSocketAddress>> namenodes =
|
||||||
|
DFSUtil.getNNServiceRpcAddresses(s.conf);
|
||||||
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf);
|
final int r = Balancer.run(namenodes, Balancer.Parameters.DEFALUT, s.conf);
|
||||||
Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
Assert.assertEquals(Balancer.ReturnStatus.SUCCESS.code, r);
|
||||||
|
|
||||||
|
@ -24,6 +24,7 @@
|
|||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
import java.util.StringTokenizer;
|
import java.util.StringTokenizer;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
@ -32,6 +33,7 @@
|
|||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
|
||||||
|
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
|
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||||
import org.apache.hadoop.hdfs.tools.GetConf;
|
import org.apache.hadoop.hdfs.tools.GetConf;
|
||||||
@ -80,13 +82,13 @@ private String[] setupAddress(HdfsConfiguration conf, String key,
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Convert list of InetSocketAddress to string array with each address
|
* Convert the map returned from DFSUtil functions to an array of
|
||||||
* represented as "host:port"
|
* addresses represented as "host:port"
|
||||||
*/
|
*/
|
||||||
private String[] toStringArray(List<InetSocketAddress> list) {
|
private String[] toStringArray(List<ConfiguredNNAddress> list) {
|
||||||
String[] ret = new String[list.size()];
|
String[] ret = new String[list.size()];
|
||||||
for (int i = 0; i < list.size(); i++) {
|
for (int i = 0; i < list.size(); i++) {
|
||||||
ret[i] = NameNode.getHostPortString(list.get(i));
|
ret[i] = NameNode.getHostPortString(list.get(i).getAddress());
|
||||||
}
|
}
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
@ -94,8 +96,8 @@ private String[] toStringArray(List<InetSocketAddress> list) {
|
|||||||
/**
|
/**
|
||||||
* Using DFSUtil methods get the list of given {@code type} of address
|
* Using DFSUtil methods get the list of given {@code type} of address
|
||||||
*/
|
*/
|
||||||
private List<InetSocketAddress> getAddressListFromConf(TestType type,
|
private Map<String, Map<String, InetSocketAddress>> getAddressListFromConf(
|
||||||
HdfsConfiguration conf) throws IOException {
|
TestType type, HdfsConfiguration conf) throws IOException {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case NAMENODE:
|
case NAMENODE:
|
||||||
return DFSUtil.getNNServiceRpcAddresses(conf);
|
return DFSUtil.getNNServiceRpcAddresses(conf);
|
||||||
@ -161,7 +163,7 @@ private String getAddressListFromTool(TestType type, HdfsConfiguration conf,
|
|||||||
* @param expected, expected addresses
|
* @param expected, expected addresses
|
||||||
*/
|
*/
|
||||||
private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
|
private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
|
||||||
boolean checkPort, List<InetSocketAddress> expected) throws Exception {
|
boolean checkPort, List<ConfiguredNNAddress> expected) throws Exception {
|
||||||
String out = getAddressListFromTool(type, conf, expected.size() != 0);
|
String out = getAddressListFromTool(type, conf, expected.size() != 0);
|
||||||
List<String> values = new ArrayList<String>();
|
List<String> values = new ArrayList<String>();
|
||||||
|
|
||||||
@ -176,7 +178,8 @@ private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
|
|||||||
// Convert expected list to String[] of hosts
|
// Convert expected list to String[] of hosts
|
||||||
int i = 0;
|
int i = 0;
|
||||||
String[] expectedHosts = new String[expected.size()];
|
String[] expectedHosts = new String[expected.size()];
|
||||||
for (InetSocketAddress addr : expected) {
|
for (ConfiguredNNAddress cnn : expected) {
|
||||||
|
InetSocketAddress addr = cnn.getAddress();
|
||||||
if (!checkPort) {
|
if (!checkPort) {
|
||||||
expectedHosts[i++] = addr.getHostName();
|
expectedHosts[i++] = addr.getHostName();
|
||||||
}else {
|
}else {
|
||||||
@ -191,7 +194,9 @@ private void getAddressListFromTool(TestType type, HdfsConfiguration conf,
|
|||||||
private void verifyAddresses(HdfsConfiguration conf, TestType type,
|
private void verifyAddresses(HdfsConfiguration conf, TestType type,
|
||||||
boolean checkPort, String... expected) throws Exception {
|
boolean checkPort, String... expected) throws Exception {
|
||||||
// Ensure DFSUtil returned the right set of addresses
|
// Ensure DFSUtil returned the right set of addresses
|
||||||
List<InetSocketAddress> list = getAddressListFromConf(type, conf);
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
|
getAddressListFromConf(type, conf);
|
||||||
|
List<ConfiguredNNAddress> list = DFSUtil.flattenAddressMap(map);
|
||||||
String[] actual = toStringArray(list);
|
String[] actual = toStringArray(list);
|
||||||
Arrays.sort(actual);
|
Arrays.sort(actual);
|
||||||
Arrays.sort(expected);
|
Arrays.sort(expected);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user