Compare commits
11 Commits
trunk
...
HADOOP-178
Author | SHA1 | Date |
---|---|---|
Renukaprasad C | f5e9b6861a | |
Renukaprasad C | c0f0b33e40 | |
Brahma Reddy Battula | 7118db5ee3 | |
Brahma Reddy Battula | c0c70e0833 | |
Brahma Reddy Battula | 3133386ac4 | |
Brahma Reddy Battula | 2c9b22f15c | |
Brahma Reddy Battula | eaad653180 | |
Brahma Reddy Battula | b30674140b | |
Brahma Reddy Battula | 809cca765f | |
Brahma Reddy Battula | ddecfe1524 | |
Brahma Reddy Battula | f293a2ff71 |
|
@ -619,7 +619,12 @@ function hadoop_bootstrap
|
|||
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
|
||||
|
||||
# defaults
|
||||
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
||||
# shellcheck disable=SC2154
|
||||
if [[ "${HADOOP_ALLOW_IPV6}" -ne "yes" ]]; then
|
||||
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
||||
else
|
||||
export HADOOP_OPTS=${HADOOP_OPTS:-""}
|
||||
fi
|
||||
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,7 @@
|
|||
# Kerberos security.
|
||||
# export HADOOP_JAAS_DEBUG=true
|
||||
|
||||
# Extra Java runtime options for all Hadoop commands. We don't support
|
||||
# IPv6 yet/still, so by default the preference is set to IPv4.
|
||||
# Extra Java runtime options for all Hadoop commands.
|
||||
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
|
||||
# For Kerberos debugging, an extended option set logs more information
|
||||
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
|
||||
|
|
|
@ -2562,7 +2562,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
|||
return updateConnectAddr(addressProperty, addr);
|
||||
}
|
||||
|
||||
final String connectHost = connectHostPort.split(":")[0];
|
||||
final String connectHost = NetUtils.getHostFromHostPort(connectHostPort);
|
||||
// Create connect address using client address hostname and server port.
|
||||
return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
|
||||
connectHost, addr.getPort()));
|
||||
|
|
|
@ -82,6 +82,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||
|
||||
import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
|
||||
import static org.apache.hadoop.util.KMSUtil.checkNotNull;
|
||||
|
@ -290,16 +291,20 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
|||
// In the current scheme, all hosts have to run on the same port
|
||||
int port = -1;
|
||||
String hostsPart = authority;
|
||||
|
||||
if (authority.contains(":")) {
|
||||
String[] t = authority.split(":");
|
||||
try {
|
||||
port = Integer.parseInt(t[1]);
|
||||
} catch (Exception e) {
|
||||
HostAndPort hp = HostAndPort.fromString(hostsPart);
|
||||
if (hp.hasPort()) {
|
||||
port = hp.getPort();
|
||||
hostsPart = hp.getHost();
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new IOException(
|
||||
"Could not parse port in kms uri [" + origUrl + "]");
|
||||
}
|
||||
hostsPart = t[0];
|
||||
}
|
||||
|
||||
KMSClientProvider[] providers =
|
||||
createProviders(conf, origUrl, port, hostsPart);
|
||||
return new LoadBalancingKMSClientProvider(providerUri, providers, conf);
|
||||
|
|
|
@ -199,8 +199,16 @@ public class Path
|
|||
int start = 0;
|
||||
|
||||
// parse uri scheme, if any
|
||||
int colon = pathString.indexOf(':');
|
||||
int colon = -1;
|
||||
int slash = pathString.indexOf('/');
|
||||
if (StringUtils.countMatches(pathString, ":") > 2) {
|
||||
//In case of IPv6 address, we should be able to parse the scheme
|
||||
// correctly (This will ensure to parse path with & without scheme
|
||||
// correctly in IPv6).
|
||||
colon = pathString.indexOf(":/");
|
||||
} else {
|
||||
colon = pathString.indexOf(':');
|
||||
}
|
||||
if ((colon != -1) &&
|
||||
((slash == -1) || (colon < slash))) { // has a scheme
|
||||
scheme = pathString.substring(0, colon);
|
||||
|
|
|
@ -499,10 +499,11 @@ public class Client implements AutoCloseable {
|
|||
boolean trySasl = UserGroupInformation.isSecurityEnabled() ||
|
||||
(ticket != null && !ticket.getTokens().isEmpty());
|
||||
this.authProtocol = trySasl ? AuthProtocol.SASL : AuthProtocol.NONE;
|
||||
|
||||
this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +
|
||||
server.toString() +
|
||||
" from " + ((ticket==null)?"an unknown user":ticket.getUserName()));
|
||||
|
||||
this.setName(
|
||||
"IPC Client (" + socketFactory.hashCode() + ") connection to "
|
||||
+ NetUtils.getSocketAddressString(server) + " from " + ((ticket
|
||||
== null) ? "an unknown user" : ticket.getUserName()));
|
||||
this.setDaemon(true);
|
||||
}
|
||||
|
||||
|
@ -636,8 +637,9 @@ public class Client implements AutoCloseable {
|
|||
server.getHostName(), server.getPort());
|
||||
|
||||
if (!server.equals(currentAddr)) {
|
||||
LOG.warn("Address change detected. Old: " + server.toString() +
|
||||
" New: " + currentAddr.toString());
|
||||
LOG.warn("Address change detected. Old: " + NetUtils
|
||||
.getSocketAddressString(server) + " New: " + NetUtils
|
||||
.getSocketAddressString(currentAddr));
|
||||
server = currentAddr;
|
||||
UserGroupInformation ticket = remoteId.getTicket();
|
||||
this.setName("IPC Client (" + socketFactory.hashCode()
|
||||
|
@ -1835,7 +1837,7 @@ public class Client implements AutoCloseable {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return address.toString();
|
||||
return NetUtils.getSocketAddressString(address);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import org.slf4j.Logger;
|
|||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.Inet6Address;
|
||||
import java.net.NetworkInterface;
|
||||
import java.net.SocketException;
|
||||
import java.net.UnknownHostException;
|
||||
|
@ -74,22 +75,23 @@ public class DNS {
|
|||
* @throws NamingException If a NamingException is encountered
|
||||
*/
|
||||
public static String reverseDns(InetAddress hostIp, @Nullable String ns)
|
||||
throws NamingException {
|
||||
//
|
||||
// Builds the reverse IP lookup form
|
||||
// This is formed by reversing the IP numbers and appending in-addr.arpa
|
||||
//
|
||||
String[] parts = hostIp.getHostAddress().split("\\.");
|
||||
String reverseIP = parts[3] + "." + parts[2] + "." + parts[1] + "."
|
||||
+ parts[0] + ".in-addr.arpa";
|
||||
throws NamingException {
|
||||
String dnsQueryAddress;
|
||||
if (hostIp instanceof Inet6Address) {
|
||||
dnsQueryAddress = getIPv6DnsAddr((Inet6Address) hostIp, ns);
|
||||
} else {
|
||||
dnsQueryAddress = getIPv4DnsAddr(hostIp, ns);
|
||||
}
|
||||
LOG.info("Querying using DNS address: " + dnsQueryAddress);
|
||||
|
||||
DirContext ictx = new InitialDirContext();
|
||||
Attributes attribute;
|
||||
try {
|
||||
attribute = ictx.getAttributes("dns://" // Use "dns:///" if the default
|
||||
+ ((ns == null) ? "" : ns) +
|
||||
// nameserver is to be used
|
||||
"/" + reverseIP, new String[] { "PTR" });
|
||||
// Use "dns:///" if the default
|
||||
// nameserver is to be used
|
||||
attribute = ictx.getAttributes(
|
||||
"dns://" + ((ns == null) ? "" : ns) + "/" + dnsQueryAddress,
|
||||
new String[] {"PTR"});
|
||||
} finally {
|
||||
ictx.close();
|
||||
}
|
||||
|
@ -102,18 +104,53 @@ public class DNS {
|
|||
return hostname;
|
||||
}
|
||||
|
||||
private static String getIPv4DnsAddr(InetAddress hostIp, @Nullable String ns)
|
||||
throws NamingException {
|
||||
String ipString = hostIp.getHostAddress();
|
||||
LOG.info("Doing reverse DNS lookup for IPv4 address: " + ipString);
|
||||
String[] parts = ipString.split("\\.");
|
||||
if (parts.length != 4) {
|
||||
throw new NamingException("Invalid IPv4 address " + ipString);
|
||||
}
|
||||
|
||||
return parts[3] + "." + parts[2] + "." + parts[1] + "." + parts[0]
|
||||
+ ".in-addr.arpa";
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public static String getIPv6DnsAddr(Inet6Address hostIp, @Nullable String ns)
|
||||
throws NamingException {
|
||||
LOG.info("Doing reverse DNS lookup for IPv6 address: " +
|
||||
hostIp.getHostAddress());
|
||||
|
||||
// bytes need to be converted to hex string and reversed to get IPv6
|
||||
// reverse resolution address
|
||||
byte[] bytes = hostIp.getAddress();
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for(int pos = bytes.length - 1; pos >= 0; pos--) {
|
||||
byte b = bytes[pos];
|
||||
String hexStr = String.format("%02x", b);
|
||||
sb.append(hexStr.charAt(1));
|
||||
sb.append(".");
|
||||
sb.append(hexStr.charAt(0));
|
||||
sb.append(".");
|
||||
}
|
||||
sb.append("ip6.arpa");
|
||||
return sb.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* @return NetworkInterface for the given subinterface name (eg eth0:0)
|
||||
* or null if no interface with the given name can be found
|
||||
*/
|
||||
private static NetworkInterface getSubinterface(String strInterface)
|
||||
throws SocketException {
|
||||
Enumeration<NetworkInterface> nifs =
|
||||
NetworkInterface.getNetworkInterfaces();
|
||||
Enumeration<NetworkInterface> nifs =
|
||||
NetworkInterface.getNetworkInterfaces();
|
||||
|
||||
while (nifs.hasMoreElements()) {
|
||||
Enumeration<NetworkInterface> subNifs =
|
||||
nifs.nextElement().getSubInterfaces();
|
||||
Enumeration<NetworkInterface> subNifs =
|
||||
nifs.nextElement().getSubInterfaces();
|
||||
|
||||
while (subNifs.hasMoreElements()) {
|
||||
NetworkInterface nif = subNifs.nextElement();
|
||||
|
|
|
@ -40,12 +40,12 @@ import java.nio.channels.SocketChannel;
|
|||
import java.nio.channels.UnresolvedAddressException;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.regex.Pattern;
|
||||
import java.util.*;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
|
||||
import org.apache.commons.lang3.StringUtils;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.cache.Cache;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;
|
||||
|
@ -61,6 +61,11 @@ import org.apache.hadoop.ipc.VersionedProtocol;
|
|||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
||||
import org.apache.http.conn.util.InetAddressUtils;
|
||||
import java.net.*;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
@ -70,7 +75,7 @@ import org.slf4j.LoggerFactory;
|
|||
public class NetUtils {
|
||||
private static final Logger LOG = LoggerFactory.getLogger(NetUtils.class);
|
||||
|
||||
private static Map<String, String> hostToResolved =
|
||||
private static Map<String, String> hostToResolved =
|
||||
new HashMap<String, String>();
|
||||
/** text to point users elsewhere: {@value} */
|
||||
private static final String FOR_MORE_DETAILS_SEE
|
||||
|
@ -219,6 +224,22 @@ public class NetUtils {
|
|||
}
|
||||
target = target.trim();
|
||||
boolean hasScheme = target.contains("://");
|
||||
if (StringUtils.countMatches(target, ":") > 2) {
|
||||
// if scheme exists in the target
|
||||
// for example : https://ffff:ffff:ffff:ffff::1:XXXXX
|
||||
// we have to form https://[ffff:ffff:ffff:ffff::1]:XXXXX
|
||||
if (hasScheme) {
|
||||
int i = target.lastIndexOf("/");
|
||||
String scheme = target.substring(0, i + 1);
|
||||
String ipAddrWithPort = target.substring(i + 1);
|
||||
target = scheme + normalizeV6Address(ipAddrWithPort);
|
||||
} else {
|
||||
// if scheme does not exists in the target
|
||||
// for example : ffff:ffff:ffff:ffff::1:XXXXX
|
||||
// we have to form [ffff:ffff:ffff:ffff::1]:XXXXX
|
||||
target = normalizeV6Address(target);
|
||||
}
|
||||
}
|
||||
URI uri = createURI(target, hasScheme, helpText, useCacheIfPresent);
|
||||
|
||||
String host = uri.getHost();
|
||||
|
@ -271,6 +292,24 @@ public class NetUtils {
|
|||
return uri;
|
||||
}
|
||||
|
||||
public static String normalizeV6Address(String target) {
|
||||
if (!target.startsWith("[")) {
|
||||
if (target.contains("%")) {
|
||||
int i = target.lastIndexOf('%');
|
||||
target = target.trim();
|
||||
String port = target.substring(target.lastIndexOf(":") + 1);
|
||||
String addr = target.substring(0, i);
|
||||
target = "[" + addr + "]" + ":" + port;
|
||||
} else {
|
||||
int i = target.lastIndexOf(':');
|
||||
String port = target.substring(target.lastIndexOf(":") + 1);
|
||||
String addr = target.substring(0, i);
|
||||
target = "[" + addr + "]" + ":" + port;
|
||||
}
|
||||
}
|
||||
return target;
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a socket address with the given host and port. The hostname
|
||||
* might be replaced with another host that was set via
|
||||
|
@ -669,9 +708,6 @@ public class NetUtils {
|
|||
}
|
||||
}
|
||||
|
||||
private static final Pattern ipPortPattern = // Pattern for matching ip[:port]
|
||||
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d+)?");
|
||||
|
||||
/**
|
||||
* Attempt to obtain the host name of the given string which contains
|
||||
* an IP address and an optional port.
|
||||
|
@ -680,16 +716,26 @@ public class NetUtils {
|
|||
* @return Host name or null if the name can not be determined
|
||||
*/
|
||||
public static String getHostNameOfIP(String ipPort) {
|
||||
if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
|
||||
String ip = null;
|
||||
if (null == ipPort || ipPort.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
int colonIdx = ipPort.indexOf(':');
|
||||
String ip = (-1 == colonIdx) ? ipPort
|
||||
: ipPort.substring(0, ipPort.indexOf(':'));
|
||||
HostAndPort hostAndPort = HostAndPort.fromString(ipPort);
|
||||
ip = hostAndPort.getHost();
|
||||
if (!InetAddresses.isInetAddress(ip)) {
|
||||
return null;
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.debug("getHostNameOfIP: '" + ipPort
|
||||
+ "' is not a valid IP address or IP/Port pair.", e);
|
||||
return null;
|
||||
}
|
||||
|
||||
try {
|
||||
return InetAddress.getByName(ip).getHostName();
|
||||
} catch (UnknownHostException e) {
|
||||
LOG.trace("getHostNameOfIP: '"+ipPort+"' name not resolved.", e);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
@ -702,8 +748,20 @@ public class NetUtils {
|
|||
* @return host:port
|
||||
*/
|
||||
public static String normalizeIP2HostName(String ipPort) {
|
||||
if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
|
||||
return ipPort;
|
||||
String ip = null;
|
||||
if (null == ipPort || ipPort.isEmpty()) {
|
||||
return null;
|
||||
}
|
||||
try {
|
||||
HostAndPort hostAndPort = HostAndPort.fromString(ipPort);
|
||||
ip = hostAndPort.getHost();
|
||||
if (!InetAddresses.isInetAddress(ip)) {
|
||||
return null;
|
||||
}
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.debug("getHostNameOfIP: '" + ipPort
|
||||
+ "' is not a valid IP address or IP/Port pair.", e);
|
||||
return null;
|
||||
}
|
||||
|
||||
InetSocketAddress address = createSocketAddr(ipPort);
|
||||
|
@ -735,11 +793,88 @@ public class NetUtils {
|
|||
|
||||
/**
|
||||
* Compose a "host:port" string from the address.
|
||||
*
|
||||
* Note that this preferentially returns the host name if available; if the
|
||||
* IP address is desired, use getIPPortString(); if both are desired as in
|
||||
* InetSocketAddress.toString, use getSocketAddressString()
|
||||
*/
|
||||
public static String getHostPortString(InetSocketAddress addr) {
|
||||
return addr.getHostName() + ":" + addr.getPort();
|
||||
String hostName = addr.getHostName();
|
||||
if (InetAddressUtils.isIPv6Address(hostName)) {
|
||||
return "[" + hostName + "]:" + addr.getPort();
|
||||
}
|
||||
return hostName.toLowerCase() + ":" + addr.getPort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Compose a "ip:port" string from the InetSocketAddress.
|
||||
*
|
||||
* Note that this may result in an NPE if passed an unresolved
|
||||
* InetSocketAddress.
|
||||
*/
|
||||
public static String getIPPortString(InetSocketAddress addr) {
|
||||
final InetAddress ip = addr.getAddress();
|
||||
// this is a judgement call, and we might arguably just guard against NPE
|
||||
// by treating null as "" ; I think this is going to hide more bugs than it
|
||||
// prevents
|
||||
if (ip == null) {
|
||||
throw new IllegalArgumentException(
|
||||
"getIPPortString called with unresolved InetSocketAddress : "
|
||||
+ getSocketAddressString(addr));
|
||||
}
|
||||
String ipString = ip.getHostAddress();
|
||||
if (ip instanceof Inet6Address) {
|
||||
return "[" + ipString + "]:" + addr.getPort();
|
||||
}
|
||||
return ipString + ":" + addr.getPort();
|
||||
}
|
||||
|
||||
public static String getIPPortString(String ipAddr, int port) {
|
||||
String s;
|
||||
if (ipAddr != null) {
|
||||
s = ipAddr + ":" + port;
|
||||
} else {
|
||||
s = ":" + port;
|
||||
}
|
||||
//Blank eventually will get to treated as localhost if this gets down to
|
||||
// InetAddress. Tests extensively use a blank address, and we don't want
|
||||
// to change behavior here.
|
||||
if (ipAddr != null && !ipAddr.isEmpty() && InetAddressUtils
|
||||
.isIPv6Address(ipAddr)) {
|
||||
try {
|
||||
InetAddress addr = InetAddress.getByName(ipAddr);
|
||||
String cleanAddr = addr.getHostAddress();
|
||||
if (addr instanceof Inet6Address) {
|
||||
s = '[' + cleanAddr + ']' + ":" + port;
|
||||
}
|
||||
} catch (UnknownHostException e) {
|
||||
// ignore anything that isn't an IPv6 literal and keep the old
|
||||
// behavior. could add debug log here, but this should only happen
|
||||
// if there's a bug in InetAddressUtils.isIPv6Address which accepts
|
||||
// something that isn't an IPv6 literal.
|
||||
}
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
||||
/**
|
||||
* An IPv6-safe version of InetSocketAddress.toString().
|
||||
* Note that this will typically be of the form hostname/IP:port and is NOT
|
||||
* a substitute for getHostPortString or getIPPortString.
|
||||
*/
|
||||
public static String getSocketAddressString(InetSocketAddress addr) {
|
||||
if (addr.isUnresolved()) {
|
||||
return addr.toString();
|
||||
}
|
||||
InetAddress ip = addr.getAddress();
|
||||
if (ip instanceof Inet6Address) {
|
||||
String hostName = addr.getHostName();
|
||||
return ((hostName != null) ? hostName : "")
|
||||
+ "/[" + ip.getHostAddress() + "]:" + addr.getPort();
|
||||
} else {
|
||||
return addr.toString();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if {@code host} is a local host name and return {@link InetAddress}
|
||||
* corresponding to that address.
|
||||
|
@ -1036,6 +1171,38 @@ public class NetUtils {
|
|||
return port;
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper method on HostAndPort; returns the port from a host:port
|
||||
* or IP:port pair.
|
||||
*
|
||||
* It's probably best to create your own HostAndPort.fromString(hp) and
|
||||
* do a .getPort and .getHostText if you need both host and port in one
|
||||
* scope.
|
||||
*/
|
||||
public static int getPortFromHostPort(String hp) {
|
||||
return HostAndPort.fromString(hp).getPort();
|
||||
}
|
||||
|
||||
/**
|
||||
* Wrapper method on HostAndPort; returns the host from a host:port
|
||||
* or IP:port pair.
|
||||
*
|
||||
* It's probably best to create your own HostAndPort.fromString(hp) and
|
||||
* do a .getPort and .getHostText if you need both host and port in one
|
||||
* scope.
|
||||
*/
|
||||
public static String getHostFromHostPort(String hp) {
|
||||
return HostAndPort.fromString(hp).getHost();
|
||||
}
|
||||
|
||||
public static InetAddress getInetAddressFromInetSocketAddressString(
|
||||
String remoteAddr) {
|
||||
int slashIdx = remoteAddr.indexOf('/') + 1;
|
||||
int colonIdx = remoteAddr.lastIndexOf(':');
|
||||
String ipOnly = remoteAddr.substring(slashIdx, colonIdx);
|
||||
return InetAddresses.forString(ipOnly);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return an @{@link InetAddress} to bind to. If bindWildCardAddress is true
|
||||
* than returns null.
|
||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configurable;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||
|
||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SOCKS_SERVER_KEY;
|
||||
|
||||
|
@ -148,13 +149,16 @@ public class SocksSocketFactory extends SocketFactory implements
|
|||
* @param proxyStr the proxy address using the format "host:port"
|
||||
*/
|
||||
private void setProxy(String proxyStr) {
|
||||
String[] strs = proxyStr.split(":", 2);
|
||||
if (strs.length != 2)
|
||||
try {
|
||||
HostAndPort hp = HostAndPort.fromString(proxyStr);
|
||||
if (!hp.hasPort()) {
|
||||
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
|
||||
}
|
||||
String host = hp.getHost();
|
||||
this.proxy = new Proxy(Proxy.Type.SOCKS,
|
||||
InetSocketAddress.createUnresolved(host, hp.getPort()));
|
||||
} catch (IllegalArgumentException e) {
|
||||
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
|
||||
String host = strs[0];
|
||||
int port = Integer.parseInt(strs[1]);
|
||||
this.proxy =
|
||||
new Proxy(Proxy.Type.SOCKS, InetSocketAddress.createUnresolved(host,
|
||||
port));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -445,7 +445,7 @@ public final class SecurityUtil {
|
|||
if (token != null) {
|
||||
token.setService(service);
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Acquired token "+token); // Token#toString() prints service
|
||||
LOG.debug("Acquired token " + token); // Token#toString() prints service
|
||||
}
|
||||
} else {
|
||||
LOG.warn("Failed to get token for service "+service);
|
||||
|
@ -459,18 +459,15 @@ public final class SecurityUtil {
|
|||
* hadoop.security.token.service.use_ip
|
||||
*/
|
||||
public static Text buildTokenService(InetSocketAddress addr) {
|
||||
String host = null;
|
||||
if (useIpForTokenService) {
|
||||
if (addr.isUnresolved()) { // host has no ip address
|
||||
throw new IllegalArgumentException(
|
||||
new UnknownHostException(addr.getHostName())
|
||||
);
|
||||
}
|
||||
host = addr.getAddress().getHostAddress();
|
||||
} else {
|
||||
host = StringUtils.toLowerCase(addr.getHostName());
|
||||
return new Text(NetUtils.getIPPortString(addr));
|
||||
}
|
||||
return new Text(host + ":" + addr.getPort());
|
||||
return new Text(NetUtils.getHostPortString(addr));
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -125,10 +125,10 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
|
|||
+ " is not allowed to impersonate " + user.getUserName());
|
||||
}
|
||||
|
||||
MachineList MachineList = proxyHosts.get(
|
||||
MachineList machineList = proxyHosts.get(
|
||||
getProxySuperuserIpConfKey(realUser.getShortUserName()));
|
||||
|
||||
if(MachineList == null || !MachineList.includes(remoteAddress)) {
|
||||
if(machineList == null || !machineList.includes(remoteAddress)) {
|
||||
throw new AuthorizationException("Unauthorized connection for super-user: "
|
||||
+ realUser.getUserName() + " from IP " + remoteAddress);
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import java.util.concurrent.CountDownLatch;
|
|||
import java.util.concurrent.TimeUnit;
|
||||
import java.util.concurrent.TimeoutException;
|
||||
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.ServerSocketUtil;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
@ -330,56 +331,39 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
|
|||
return tmpDir;
|
||||
}
|
||||
|
||||
private static int getPort(String hostPort) {
|
||||
String[] split = hostPort.split(":");
|
||||
String portstr = split[split.length-1];
|
||||
String[] pc = portstr.split("/");
|
||||
if (pc.length > 1) {
|
||||
portstr = pc[0];
|
||||
}
|
||||
return Integer.parseInt(portstr);
|
||||
static ServerCnxnFactory createNewServerInstance(File dataDir,
|
||||
ServerCnxnFactory factory, String hostPort, int maxCnxns)
|
||||
throws IOException, InterruptedException {
|
||||
ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
|
||||
final int port = NetUtils.getPortFromHostPort(hostPort);
|
||||
if (factory == null) {
|
||||
factory = ServerCnxnFactory.createFactory(port, maxCnxns);
|
||||
}
|
||||
factory.startup(zks);
|
||||
Assert.assertTrue("waiting for server up", ClientBaseWithFixes
|
||||
.waitForServerUp("127.0.0.1:" + port, CONNECTION_TIMEOUT));
|
||||
|
||||
static ServerCnxnFactory createNewServerInstance(File dataDir,
|
||||
ServerCnxnFactory factory, String hostPort, int maxCnxns)
|
||||
throws IOException, InterruptedException
|
||||
{
|
||||
ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
|
||||
final int PORT = getPort(hostPort);
|
||||
if (factory == null) {
|
||||
factory = ServerCnxnFactory.createFactory(PORT, maxCnxns);
|
||||
}
|
||||
factory.startup(zks);
|
||||
Assert.assertTrue("waiting for server up",
|
||||
ClientBaseWithFixes.waitForServerUp("127.0.0.1:" + PORT,
|
||||
CONNECTION_TIMEOUT));
|
||||
return factory;
|
||||
}
|
||||
|
||||
return factory;
|
||||
}
|
||||
|
||||
static void shutdownServerInstance(ServerCnxnFactory factory,
|
||||
String hostPort)
|
||||
{
|
||||
if (factory != null) {
|
||||
ZKDatabase zkDb;
|
||||
{
|
||||
ZooKeeperServer zs = getServer(factory);
|
||||
|
||||
zkDb = zs.getZKDatabase();
|
||||
}
|
||||
factory.shutdown();
|
||||
try {
|
||||
zkDb.close();
|
||||
} catch (IOException ie) {
|
||||
LOG.warn("Error closing logs ", ie);
|
||||
}
|
||||
final int PORT = getPort(hostPort);
|
||||
|
||||
Assert.assertTrue("waiting for server down",
|
||||
ClientBaseWithFixes.waitForServerDown("127.0.0.1:" + PORT,
|
||||
CONNECTION_TIMEOUT));
|
||||
}
|
||||
static void shutdownServerInstance(ServerCnxnFactory factory,
|
||||
String hostPort) {
|
||||
if (factory != null) {
|
||||
ZKDatabase zkDb;
|
||||
ZooKeeperServer zs = getServer(factory);
|
||||
zkDb = zs.getZKDatabase();
|
||||
factory.shutdown();
|
||||
try {
|
||||
zkDb.close();
|
||||
} catch (IOException ie) {
|
||||
LOG.warn("Error closing logs ", ie);
|
||||
}
|
||||
final int port = NetUtils.getPortFromHostPort(hostPort);
|
||||
|
||||
Assert.assertTrue("waiting for server down", ClientBaseWithFixes
|
||||
.waitForServerDown("127.0.0.1:" + port, CONNECTION_TIMEOUT));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test specific setup
|
||||
|
|
|
@ -24,6 +24,7 @@ import java.net.NetworkInterface;
|
|||
import java.net.SocketException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.net.InetAddress;
|
||||
import java.net.Inet6Address;
|
||||
|
||||
import javax.naming.CommunicationException;
|
||||
import javax.naming.NameNotFoundException;
|
||||
|
@ -251,4 +252,20 @@ public class TestDNS {
|
|||
assertNotNull("localhost is null", localhost);
|
||||
LOG.info("Localhost IPAddr is " + localhost.toString());
|
||||
}
|
||||
|
||||
/**
|
||||
* Test that dns query address is calculated correctly for ipv6 addresses.
|
||||
*/
|
||||
@Test
|
||||
public void testIPv6ReverseDNSAddress() throws Exception {
|
||||
Inet6Address adr = (Inet6Address) InetAddress.getByName("::");
|
||||
assertEquals(
|
||||
"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa",
|
||||
DNS.getIPv6DnsAddr(adr, null));
|
||||
|
||||
adr = (Inet6Address) InetAddress.getByName("fe80::62eb:69ff:fe9b:bade");
|
||||
assertEquals(
|
||||
"e.d.a.b.b.9.e.f.f.f.9.6.b.e.2.6.0.0.0.0.0.0.0.0.0.0.0.0.0.8.e.f.ip6.arpa",
|
||||
DNS.getIPv6DnsAddr(adr, null));
|
||||
}
|
||||
}
|
||||
|
|
|
@ -57,6 +57,14 @@ public class TestNetUtils {
|
|||
private static final String DEST_PORT_NAME = Integer.toString(DEST_PORT);
|
||||
private static final int LOCAL_PORT = 8080;
|
||||
private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
|
||||
private static final String IPV6_LOOPBACK_LONG_STRING = "0:0:0:0:0:0:0:1";
|
||||
private static final String IPV6_SAMPLE_ADDRESS =
|
||||
"2a03:2880:2130:cf05:face:b00c:0:1";
|
||||
private static final String IPV6_LOOPBACK_SHORT_STRING = "::1";
|
||||
private static final String IPV6_LOOPBACK_WITH_PORT =
|
||||
"[" + IPV6_LOOPBACK_LONG_STRING + "]:10";
|
||||
private static final String IPV6_SAMPLE_WITH_PORT =
|
||||
"[" + IPV6_SAMPLE_ADDRESS + "]:10";
|
||||
|
||||
/**
|
||||
* Some slop around expected times when making sure timeouts behave
|
||||
|
@ -583,13 +591,19 @@ public class TestNetUtils {
|
|||
return addr;
|
||||
}
|
||||
|
||||
private void
|
||||
verifyInetAddress(InetAddress addr, String host, String ip) {
|
||||
private void verifyInetAddress(InetAddress addr, String host, String... ips) {
|
||||
assertNotNull(addr);
|
||||
assertEquals(host, addr.getHostName());
|
||||
assertEquals(ip, addr.getHostAddress());
|
||||
|
||||
boolean found = false;
|
||||
for (String ip : ips) {
|
||||
found |= ip.equals(addr.getHostAddress());
|
||||
}
|
||||
assertTrue("Expected addr.getHostAddress[" + addr.getHostAddress()
|
||||
+ "] to be one of " + StringUtils.join(ips, ","), found);
|
||||
}
|
||||
|
||||
|
||||
|
||||
@Test
|
||||
public void testResolverUnqualified() {
|
||||
String host = "host";
|
||||
|
@ -619,12 +633,13 @@ public class TestNetUtils {
|
|||
}
|
||||
|
||||
// localhost
|
||||
|
||||
|
||||
@Test
|
||||
public void testResolverLoopback() {
|
||||
String host = "Localhost";
|
||||
InetAddress addr = verifyResolve(host); // no lookup should occur
|
||||
verifyInetAddress(addr, "Localhost", "127.0.0.1");
|
||||
verifyInetAddress(addr, "Localhost", "127.0.0.1", IPV6_LOOPBACK_LONG_STRING,
|
||||
IPV6_LOOPBACK_SHORT_STRING);
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -721,19 +736,22 @@ public class TestNetUtils {
|
|||
} catch (UnknownHostException e) {
|
||||
Assume.assumeTrue("Network not resolving "+ oneHost, false);
|
||||
}
|
||||
List<String> hosts = Arrays.asList("127.0.0.1",
|
||||
"localhost", oneHost, "UnknownHost123");
|
||||
List<String> hosts = Arrays.asList(new String[] {"127.0.0.1",
|
||||
"localhost", oneHost, "UnknownHost123.invalid"});
|
||||
List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts);
|
||||
String summary = "original [" + StringUtils.join(hosts, ", ") + "]"
|
||||
+ " normalized [" + StringUtils.join(normalizedHosts, ", ") + "]";
|
||||
// when ipaddress is normalized, same address is expected in return
|
||||
assertEquals(summary, hosts.get(0), normalizedHosts.get(0));
|
||||
// for normalizing a resolvable hostname, resolved ipaddress is expected in return
|
||||
|
||||
assertFalse("Element 1 equal "+ summary,
|
||||
normalizedHosts.get(1).equals(hosts.get(1)));
|
||||
assertEquals(summary, hosts.get(0), normalizedHosts.get(1));
|
||||
// this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric,
|
||||
// its ipaddress is expected to return
|
||||
assertTrue("Should get the localhost address back",
|
||||
normalizedHosts.get(1).equals(hosts.get(0)) || normalizedHosts.get(1)
|
||||
.equals(IPV6_LOOPBACK_LONG_STRING));
|
||||
// this address HADOOP-8372: when normalizing a valid resolvable hostname
|
||||
// start with numeric, its ipaddress is expected to return
|
||||
assertFalse("Element 2 equal " + summary,
|
||||
normalizedHosts.get(2).equals(hosts.get(2)));
|
||||
// return the same hostname after normalizing a irresolvable hostname.
|
||||
|
@ -745,11 +763,22 @@ public class TestNetUtils {
|
|||
assertNull(NetUtils.getHostNameOfIP(null));
|
||||
assertNull(NetUtils.getHostNameOfIP(""));
|
||||
assertNull(NetUtils.getHostNameOfIP("crazytown"));
|
||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:")); // no port
|
||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
|
||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port
|
||||
assertNotNull(NetUtils.getHostNameOfIP("[::1]"));
|
||||
assertNotNull(NetUtils.getHostNameOfIP("[::1]:1"));
|
||||
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
|
||||
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
|
||||
assertEquals("localhost", NetUtils.getHostNameOfIP("127.0.0.1:"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetHostNameOfIPworksWithIPv6() {
|
||||
assertNotNull(NetUtils.getHostNameOfIP(IPV6_LOOPBACK_LONG_STRING));
|
||||
assertNotNull(NetUtils.getHostNameOfIP(IPV6_LOOPBACK_SHORT_STRING));
|
||||
assertNotNull(NetUtils.getHostNameOfIP(IPV6_SAMPLE_ADDRESS));
|
||||
assertNotNull(NetUtils.getHostNameOfIP(IPV6_SAMPLE_WITH_PORT));
|
||||
assertNotNull(NetUtils.getHostNameOfIP(IPV6_LOOPBACK_WITH_PORT));
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -763,6 +792,18 @@ public class TestNetUtils {
|
|||
assertEquals(defaultAddr.trim(), NetUtils.getHostPortString(addr));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testTrimCreateSocketAddressIPv6() {
|
||||
Configuration conf = new Configuration();
|
||||
NetUtils.addStaticResolution("hostIPv6", IPV6_LOOPBACK_LONG_STRING);
|
||||
final String defaultAddr = "hostIPv6:1 ";
|
||||
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
|
||||
conf.setSocketAddr("myAddress", addr);
|
||||
assertTrue("Trim should have been called on ipv6 hostname",
|
||||
defaultAddr.trim().equalsIgnoreCase(NetUtils.getHostPortString(addr)));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testBindToLocalAddress() throws Exception {
|
||||
assertNotNull(NetUtils
|
||||
|
@ -776,4 +817,41 @@ public class TestNetUtils {
|
|||
String gotStr = StringUtils.join(got, ", ");
|
||||
assertEquals(expectStr, gotStr);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateSocketAddressWithIPV6() throws Throwable {
|
||||
String ipv6Address = "2a03:2880:2130:cf05:face:b00c:0:1";
|
||||
String ipv6WithPort = ipv6Address + ":12345";
|
||||
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(ipv6WithPort,
|
||||
1000, "myconfig");
|
||||
assertEquals("[" + ipv6Address + "]", addr.getHostName());
|
||||
assertEquals(12345, addr.getPort());
|
||||
|
||||
String ipv6SampleAddressWithScope = ipv6Address + "%2";
|
||||
ipv6WithPort = ipv6SampleAddressWithScope + ":12345";
|
||||
addr = NetUtils.createSocketAddr(ipv6WithPort, 1000, "myconfig");
|
||||
assertEquals("[" + ipv6Address + "]", addr.getHostName());
|
||||
assertEquals(12345, addr.getPort());
|
||||
|
||||
ipv6Address = "[2a03:2880:2130:cf05:face:b00c:0:1]";
|
||||
ipv6WithPort = ipv6Address + ":12345";
|
||||
|
||||
addr = NetUtils.createSocketAddr(ipv6WithPort, 1000, "myconfig");
|
||||
assertEquals(ipv6Address, addr.getHostName());
|
||||
assertEquals(12345, addr.getPort());
|
||||
|
||||
String ipv6AddressWithScheme =
|
||||
"https://2a03:2880:2130:cf05:face:b00c:0:1:12345";
|
||||
addr = NetUtils.createSocketAddr(ipv6AddressWithScheme, 1000,
|
||||
"myconfig");
|
||||
assertEquals(ipv6Address, addr.getHostName());
|
||||
assertEquals(12345, addr.getPort());
|
||||
|
||||
ipv6AddressWithScheme = "https://[2a03:2880:2130:cf05:face:b00c:0:1]:12345";
|
||||
addr = NetUtils.createSocketAddr(ipv6AddressWithScheme, 1000,
|
||||
"myconfig");
|
||||
assertEquals(ipv6Address, addr.getHostName());
|
||||
assertEquals(12345, addr.getPort());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
|||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.ipc.Server;
|
||||
import org.apache.hadoop.ipc.TestRpcBase;
|
||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||
import org.apache.hadoop.security.token.Token;
|
||||
|
@ -361,7 +360,6 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
public void testProxyWithToken() throws Exception {
|
||||
final Configuration conf = new Configuration(masterConf);
|
||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||
ProtobufRpcEngine2.class);
|
||||
UserGroupInformation.setConfiguration(conf);
|
||||
|
@ -408,7 +406,6 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
|||
public void testTokenBySuperUser() throws Exception {
|
||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||
final Configuration newConf = new Configuration(masterConf);
|
||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
|
||||
// Set RPC engine to protobuf RPC engine
|
||||
RPC.setProtocolEngine(newConf, TestRpcService.class,
|
||||
ProtobufRpcEngine2.class);
|
||||
|
|
|
@ -182,16 +182,12 @@ public class TestSecurityUtil {
|
|||
conf.setBoolean(
|
||||
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, true);
|
||||
SecurityUtil.setConfiguration(conf);
|
||||
assertEquals("127.0.0.1:123",
|
||||
SecurityUtil.buildTokenService(new InetSocketAddress("LocalHost", 123)).toString()
|
||||
);
|
||||
assertEquals("127.0.0.1:123",
|
||||
SecurityUtil.buildTokenService(new InetSocketAddress("127.0.0.1", 123)).toString()
|
||||
);
|
||||
// what goes in, comes out
|
||||
assertEquals("127.0.0.1:123",
|
||||
SecurityUtil.buildTokenService(NetUtils.createSocketAddr("127.0.0.1", 123)).toString()
|
||||
);
|
||||
assertOneOf(SecurityUtil
|
||||
.buildTokenService(NetUtils.createSocketAddrForHost("LocalHost", 123))
|
||||
.toString(), "127.0.0.1:123", "[0:0:0:0:0:0:0:1]:123");
|
||||
assertOneOf(SecurityUtil
|
||||
.buildTokenService(NetUtils.createSocketAddrForHost("127.0.0.1", 123))
|
||||
.toString(), "127.0.0.1:123", "[0:0:0:0:0:0:0:1]:123");
|
||||
}
|
||||
|
||||
@Test
|
||||
|
@ -496,4 +492,13 @@ public class TestSecurityUtil {
|
|||
ZK_AUTH_VALUE.toCharArray());
|
||||
provider.flush();
|
||||
}
|
||||
|
||||
private void assertOneOf(String value, String... expected) {
|
||||
boolean found = false;
|
||||
for (String ip : expected) {
|
||||
found |= ip.equals(value);
|
||||
}
|
||||
assertTrue("Expected value [" + value + "] to be one of " + StringUtils
|
||||
.join(",", expected), found);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
|
|||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
||||
import org.apache.hadoop.hdfs.util.IOUtilsClient;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.unix.DomainSocket;
|
||||
import org.apache.hadoop.security.AccessControlException;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -876,6 +877,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
|||
*/
|
||||
public static String getFileName(final InetSocketAddress s,
|
||||
final String poolId, final long blockId) {
|
||||
return s.toString() + ":" + poolId + ":" + blockId;
|
||||
return NetUtils.getSocketAddressString(s) + ":" + poolId + ":" + blockId;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.classification.InterfaceStability;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import java.net.InetSocketAddress;
|
||||
|
||||
/**
|
||||
|
@ -125,8 +125,9 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
|||
}
|
||||
|
||||
public void setIpAddr(String ipAddr) {
|
||||
this.ipAddr = ipAddr;
|
||||
//updated during registration, preserve former xferPort
|
||||
setIpAndXferPort(ipAddr, getByteString(ipAddr), xferPort);
|
||||
setIpAndXferPort(this.ipAddr, getByteString(ipAddr), xferPort);
|
||||
}
|
||||
|
||||
private void setIpAndXferPort(String ipAddr, ByteString ipAddrBytes,
|
||||
|
@ -135,7 +136,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
|||
this.ipAddr = ipAddr;
|
||||
this.ipAddrBytes = ipAddrBytes;
|
||||
this.xferPort = xferPort;
|
||||
this.xferAddr = ipAddr + ":" + xferPort;
|
||||
this.xferAddr = NetUtils.getIPPortString(ipAddr, xferPort);
|
||||
}
|
||||
|
||||
public void setPeerHostName(String peerHostName) {
|
||||
|
@ -201,21 +202,21 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
|||
* @return IP:ipcPort string
|
||||
*/
|
||||
private String getIpcAddr() {
|
||||
return ipAddr + ":" + ipcPort;
|
||||
return NetUtils.getIPPortString(ipAddr, ipcPort);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return IP:infoPort string
|
||||
*/
|
||||
public String getInfoAddr() {
|
||||
return ipAddr + ":" + infoPort;
|
||||
return NetUtils.getIPPortString(ipAddr, infoPort);
|
||||
}
|
||||
|
||||
/**
|
||||
* @return IP:infoPort string
|
||||
*/
|
||||
public String getInfoSecureAddr() {
|
||||
return ipAddr + ":" + infoSecurePort;
|
||||
return NetUtils.getIPPortString(ipAddr, infoSecurePort);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -299,6 +300,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
|||
* Note that this does not update storageID.
|
||||
*/
|
||||
public void updateRegInfo(DatanodeID nodeReg) {
|
||||
ipAddr = nodeReg.getIpAddr();
|
||||
setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getIpAddrBytes(),
|
||||
nodeReg.getXferPort());
|
||||
hostName = nodeReg.getHostName();
|
||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncr
|
|||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto;
|
||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
|
||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -60,7 +61,6 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
||||
import org.apache.hadoop.thirdparty.protobuf.ByteString;
|
||||
|
||||
/**
|
||||
|
@ -157,11 +157,8 @@ public final class DataTransferSaslUtil {
|
|||
* @return InetAddress from peer
|
||||
*/
|
||||
public static InetAddress getPeerAddress(Peer peer) {
|
||||
String remoteAddr = peer.getRemoteAddressString().split(":")[0];
|
||||
int slashIdx = remoteAddr.indexOf('/');
|
||||
return InetAddresses.forString(slashIdx != -1 ?
|
||||
remoteAddr.substring(slashIdx + 1, remoteAddr.length()) :
|
||||
remoteAddr);
|
||||
String remoteAddr = peer.getRemoteAddressString();
|
||||
return NetUtils.getInetAddressFromInetSocketAddressString(remoteAddr);
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -305,7 +305,7 @@ public class JsonUtilClient {
|
|||
if (ipAddr == null) {
|
||||
String name = getString(m, "name", null);
|
||||
if (name != null) {
|
||||
int colonIdx = name.indexOf(':');
|
||||
int colonIdx = name.lastIndexOf(':');
|
||||
if (colonIdx > 0) {
|
||||
ipAddr = name.substring(0, colonIdx);
|
||||
xferPort = Integer.parseInt(name.substring(colonIdx +1));
|
||||
|
|
|
@ -54,12 +54,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
|||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||
import org.apache.hadoop.ipc.RPC;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.SecurityUtil;
|
||||
import org.apache.hadoop.util.StopWatch;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
|
||||
|
@ -709,8 +709,7 @@ public class IPCLoggerChannel implements AsyncLogger {
|
|||
|
||||
@Override
|
||||
public String toString() {
|
||||
return InetAddresses.toAddrString(addr.getAddress()) + ':' +
|
||||
addr.getPort();
|
||||
return NetUtils.getHostPortString(addr);
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.util.Daemon;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.http.conn.util.InetAddressUtils;
|
||||
import org.apache.hadoop.util.Sets;
|
||||
import org.apache.hadoop.util.Timer;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -1522,7 +1523,13 @@ public class DatanodeManager {
|
|||
DatanodeID dnId;
|
||||
String hostStr;
|
||||
int port;
|
||||
int idx = hostLine.indexOf(':');
|
||||
int idx;
|
||||
|
||||
if (InetAddressUtils.isIPv6StdAddress(hostLine)) {
|
||||
idx = -1;
|
||||
} else {
|
||||
idx = hostLine.lastIndexOf(':');
|
||||
}
|
||||
|
||||
if (-1 == idx) {
|
||||
hostStr = hostLine;
|
||||
|
|
|
@ -23,12 +23,11 @@ import org.slf4j.LoggerFactory;
|
|||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.util.HostsFileReader;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.URI;
|
||||
import java.net.URISyntaxException;
|
||||
import java.util.HashSet;
|
||||
|
||||
/**
|
||||
|
@ -89,16 +88,14 @@ public class HostFileManager extends HostConfigManager {
|
|||
@VisibleForTesting
|
||||
static InetSocketAddress parseEntry(String type, String fn, String line) {
|
||||
try {
|
||||
URI uri = new URI("dummy", line, null, null, null);
|
||||
int port = uri.getPort() == -1 ? 0 : uri.getPort();
|
||||
InetSocketAddress addr = new InetSocketAddress(uri.getHost(), port);
|
||||
InetSocketAddress addr = NetUtils.createSocketAddr(line, 0);
|
||||
if (addr.isUnresolved()) {
|
||||
LOG.warn(String.format("Failed to resolve address `%s` in `%s`. " +
|
||||
"Ignoring in the %s list.", line, fn, type));
|
||||
return null;
|
||||
}
|
||||
return addr;
|
||||
} catch (URISyntaxException e) {
|
||||
} catch (IllegalArgumentException e) {
|
||||
LOG.warn(String.format("Failed to parse `%s` in `%s`. " + "Ignoring in " +
|
||||
"the %s list.", line, fn, type));
|
||||
}
|
||||
|
|
|
@ -80,11 +80,21 @@ public class BlockPoolSliceStorage extends Storage {
|
|||
* progress. Do not delete the 'previous' directory.
|
||||
*/
|
||||
static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
|
||||
private static final String BLOCK_POOL_ID_IPV4_PATTERN_BASE =
|
||||
"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}";
|
||||
|
||||
// Because we don't support ":" in path BlockPoolID on IPv6 boxes we replace
|
||||
// ":" with ".".
|
||||
// Also format of IPv6 is less fixed so we surround it with square brackets
|
||||
// and just check that match
|
||||
private static final String BLOCK_POOL_ID_IPV6_PATTERN_BASE =
|
||||
Pattern.quote("[") + "(?:.*)" + Pattern.quote("]");
|
||||
|
||||
private static final String BLOCK_POOL_ID_PATTERN_BASE =
|
||||
Pattern.quote(File.separator) +
|
||||
"BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" +
|
||||
Pattern.quote(File.separator);
|
||||
Pattern.quote(File.separator) + "BP-\\d+-(?:"
|
||||
+ BLOCK_POOL_ID_IPV4_PATTERN_BASE + "|"
|
||||
+ BLOCK_POOL_ID_IPV6_PATTERN_BASE + ")-\\d+" + Pattern
|
||||
.quote(File.separator);
|
||||
|
||||
private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern.compile(
|
||||
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(.*)$");
|
||||
|
|
|
@ -147,7 +147,7 @@ class DataXceiver extends Receiver implements Runnable {
|
|||
this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf());
|
||||
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf());
|
||||
remoteAddress = peer.getRemoteAddressString();
|
||||
final int colonIdx = remoteAddress.indexOf(':');
|
||||
final int colonIdx = remoteAddress.lastIndexOf(':');
|
||||
remoteAddressWithoutPort =
|
||||
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
|
||||
localAddress = peer.getLocalAddressString();
|
||||
|
|
|
@ -103,9 +103,9 @@ class Checkpointer extends Daemon {
|
|||
checkpointConf = new CheckpointConf(conf);
|
||||
|
||||
// Pull out exact http address for posting url to avoid ip aliasing issues
|
||||
String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
||||
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
|
||||
infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
|
||||
String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
||||
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
|
||||
infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.lastIndexOf(":"));
|
||||
|
||||
LOG.info("Checkpoint Period : " +
|
||||
checkpointConf.getPeriod() + " secs " +
|
||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.io.IOUtils;
|
|||
import org.apache.hadoop.net.DNS;
|
||||
import org.apache.hadoop.util.Lists;
|
||||
import org.apache.hadoop.util.Time;
|
||||
import org.apache.http.conn.util.InetAddressUtils;
|
||||
import org.eclipse.jetty.util.ajax.JSON;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
|
@ -1024,6 +1025,10 @@ public class NNStorage extends Storage implements Closeable,
|
|||
String ip;
|
||||
try {
|
||||
ip = DNS.getDefaultIP("default");
|
||||
if (InetAddressUtils.isIPv6StdAddress(ip)) {
|
||||
// HDFS doesn't support ":" in path, replace it with "."
|
||||
ip = "[" + ip.replaceAll(":", ".") + "]";
|
||||
}
|
||||
} catch (UnknownHostException e) {
|
||||
LOG.warn("Could not find ip address of \"default\" inteface.");
|
||||
throw e;
|
||||
|
|
|
@ -117,6 +117,7 @@ import org.apache.hadoop.util.StringUtils;
|
|||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||
import com.sun.jersey.spi.container.ResourceFilters;
|
||||
|
||||
/** Web-hdfs NameNode implementation. */
|
||||
|
@ -273,22 +274,22 @@ public class NamenodeWebHdfsMethods {
|
|||
|
||||
HashSet<Node> excludes = new HashSet<Node>();
|
||||
if (excludeDatanodes != null) {
|
||||
for (String host : StringUtils
|
||||
for (String hostAndPort : StringUtils
|
||||
.getTrimmedStringCollection(excludeDatanodes)) {
|
||||
int idx = host.indexOf(":");
|
||||
HostAndPort hp = HostAndPort.fromString(hostAndPort);
|
||||
Node excludeNode = null;
|
||||
if (idx != -1) {
|
||||
excludeNode = bm.getDatanodeManager().getDatanodeByXferAddr(
|
||||
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1)));
|
||||
if (hp.hasPort()) {
|
||||
excludeNode = bm.getDatanodeManager()
|
||||
.getDatanodeByXferAddr(hp.getHost(), hp.getPort());
|
||||
} else {
|
||||
excludeNode = bm.getDatanodeManager().getDatanodeByHost(host);
|
||||
excludeNode = bm.getDatanodeManager().getDatanodeByHost(hostAndPort);
|
||||
}
|
||||
|
||||
if (excludeNode != null) {
|
||||
excludes.add(excludeNode);
|
||||
} else {
|
||||
LOG.debug("DataNode {} was requested to be excluded, "
|
||||
+ "but it was not found.", host);
|
||||
+ "but it was not found.", hostAndPort);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
|||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
|
@ -245,7 +246,7 @@ public class GetConf extends Configured implements Tool {
|
|||
if (!cnnlist.isEmpty()) {
|
||||
for (ConfiguredNNAddress cnn : cnnlist) {
|
||||
InetSocketAddress rpc = cnn.getAddress();
|
||||
tool.printOut(rpc.getHostName()+":"+rpc.getPort());
|
||||
tool.printOut(NetUtils.getHostPortString(rpc));
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -34,6 +34,7 @@ import io.netty.handler.codec.string.StringEncoder;
|
|||
import io.netty.util.concurrent.GlobalEventExecutor;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||
import org.apache.hadoop.security.UserGroupInformation;
|
||||
|
@ -122,7 +123,9 @@ public class WebImageViewer implements Closeable {
|
|||
allChannels.add(channel);
|
||||
|
||||
address = (InetSocketAddress) channel.localAddress();
|
||||
LOG.info("WebImageViewer started. Listening on " + address.toString() + ". Press Ctrl+C to stop the viewer.");
|
||||
LOG.info("WebImageViewer started. Listening on " + NetUtils
|
||||
.getSocketAddressString(address) +
|
||||
". Press Ctrl+C to stop the viewer.");
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -227,8 +227,10 @@
|
|||
var n = nodes[i];
|
||||
n.usedPercentage = Math.round((n.used + n.nonDfsUsedSpace) * 1.0 / n.capacity * 100);
|
||||
|
||||
var port = n.infoAddr.split(":")[1];
|
||||
var securePort = n.infoSecureAddr.split(":")[1];
|
||||
var array = n.infoAddr.split(":");
|
||||
var port = array[array.length-1];
|
||||
array = n.infoSecureAddr.split(":");
|
||||
var securePort = array[array.length-1];
|
||||
var dnHost = n.name.split(":")[0];
|
||||
n.dnWebAddress = "http://" + dnHost + ":" + port;
|
||||
if (securePort != 0) {
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
|
||||
|
@ -55,7 +56,7 @@ public class TestDFSAddressConfig {
|
|||
ArrayList<DataNode> dns = cluster.getDataNodes();
|
||||
DataNode dn = dns.get(0);
|
||||
|
||||
String selfSocketAddr = dn.getXferAddress().toString();
|
||||
String selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
|
||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||
|
||||
|
@ -80,7 +81,7 @@ public class TestDFSAddressConfig {
|
|||
dns = cluster.getDataNodes();
|
||||
dn = dns.get(0);
|
||||
|
||||
selfSocketAddr = dn.getXferAddress().toString();
|
||||
selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
|
||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||
// assert that default self socket address is 127.0.0.1
|
||||
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||
|
@ -105,10 +106,11 @@ public class TestDFSAddressConfig {
|
|||
dns = cluster.getDataNodes();
|
||||
dn = dns.get(0);
|
||||
|
||||
selfSocketAddr = dn.getXferAddress().toString();
|
||||
selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
|
||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||
// assert that default self socket address is 0.0.0.0
|
||||
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
|
||||
assertTrue(selfSocketAddr.contains("/0.0.0.0:") ||
|
||||
selfSocketAddr.contains("/[0:0:0:0:0:0:0:0]:"));
|
||||
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
|
|
@ -541,7 +541,7 @@ public class TestDFSUtil {
|
|||
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
|
||||
NS2_NN1_HOST);
|
||||
conf.set(DFSUtil.addKeySuffixes(
|
||||
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
|
||||
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
|
||||
NS2_NN2_HOST);
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> map =
|
||||
|
@ -550,17 +550,21 @@ public class TestDFSUtil {
|
|||
assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
|
||||
assertTrue(HAUtil.isHAEnabled(conf, "ns2"));
|
||||
assertFalse(HAUtil.isHAEnabled(conf, "ns3"));
|
||||
|
||||
assertEquals(NS1_NN1_HOST, map.get("ns1").get("ns1-nn1").toString());
|
||||
assertEquals(NS1_NN2_HOST, map.get("ns1").get("ns1-nn2").toString());
|
||||
assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
|
||||
assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
|
||||
|
||||
assertEquals(NS1_NN1_HOST,
|
||||
|
||||
assertEquals(NS1_NN1_HOST,
|
||||
NetUtils.getHostPortString(map.get("ns1").get("ns1-nn1")));
|
||||
assertEquals(NS1_NN2_HOST,
|
||||
NetUtils.getHostPortString(map.get("ns1").get("ns1-nn2")));
|
||||
assertEquals(NS2_NN1_HOST,
|
||||
NetUtils.getHostPortString(map.get("ns2").get("ns2-nn1")));
|
||||
assertEquals(NS2_NN2_HOST,
|
||||
NetUtils.getHostPortString(map.get("ns2").get("ns2-nn2")));
|
||||
|
||||
assertEquals(NS1_NN1_HOST,
|
||||
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
|
||||
assertEquals(NS1_NN2_HOST,
|
||||
assertEquals(NS1_NN2_HOST,
|
||||
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
|
||||
assertEquals(NS2_NN1_HOST,
|
||||
assertEquals(NS2_NN1_HOST,
|
||||
DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
|
||||
|
||||
// No nameservice was given and we can't determine which service addr
|
||||
|
@ -630,8 +634,29 @@ public class TestDFSUtil {
|
|||
Map<String, Map<String, InetSocketAddress>> map =
|
||||
DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
|
||||
|
||||
assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
|
||||
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
|
||||
assertEquals(NS1_NN1_ADDR,
|
||||
NetUtils.getHostPortString(map.get("ns1").get("nn1")));
|
||||
assertEquals(NS1_NN2_ADDR,
|
||||
NetUtils.getHostPortString(map.get("ns1").get("nn2")));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testIPv6GetHaNnHttpAddresses() throws IOException {
|
||||
final String logicalHostName = "ns1";
|
||||
final String ns1Nn1Addr = "[0:0:0:0:0:b00c:c0a8:12a]:8020";
|
||||
final String ns1Nn2Addr = "[::face:a0b:182a]:8020";
|
||||
|
||||
Configuration conf =
|
||||
createWebHDFSHAConfiguration(logicalHostName, ns1Nn1Addr,
|
||||
ns1Nn2Addr);
|
||||
|
||||
Map<String, Map<String, InetSocketAddress>> map =
|
||||
DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
|
||||
|
||||
assertEquals(ns1Nn1Addr,
|
||||
NetUtils.getHostPortString(map.get("ns1").get("nn1")));
|
||||
assertEquals(ns1Nn2Addr.replace("::", "0:0:0:0:0:"),
|
||||
NetUtils.getHostPortString(map.get("ns1").get("nn2")));
|
||||
}
|
||||
|
||||
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
|
|||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
import org.apache.hadoop.ipc.RemoteException;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.hadoop.util.DataChecksum;
|
||||
import org.apache.hadoop.util.Time;
|
||||
|
@ -554,7 +555,8 @@ public class TestFileAppend{
|
|||
|
||||
// stop one datanode
|
||||
DataNodeProperties dnProp = cluster.stopDataNode(0);
|
||||
String dnAddress = dnProp.datanode.getXferAddress().toString();
|
||||
String dnAddress = NetUtils.getSocketAddressString(
|
||||
dnProp.datanode.getXferAddress());
|
||||
if (dnAddress.startsWith("/")) {
|
||||
dnAddress = dnAddress.substring(1);
|
||||
}
|
||||
|
@ -609,7 +611,8 @@ public class TestFileAppend{
|
|||
|
||||
// stop one datanode
|
||||
DataNodeProperties dnProp = cluster.stopDataNode(0);
|
||||
String dnAddress = dnProp.datanode.getXferAddress().toString();
|
||||
String dnAddress = NetUtils
|
||||
.getSocketAddressString(dnProp.datanode.getXferAddress());
|
||||
if (dnAddress.startsWith("/")) {
|
||||
dnAddress = dnAddress.substring(1);
|
||||
}
|
||||
|
|
|
@ -92,11 +92,14 @@ import org.apache.hadoop.util.Time;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.slf4j.event.Level;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
||||
/**
|
||||
* This class tests various cases during file creation.
|
||||
*/
|
||||
public class TestFileCreation {
|
||||
public static final Log LOG = LogFactory.getLog(TestFileCreation.class);
|
||||
static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
|
||||
|
||||
{
|
||||
|
@ -125,7 +128,7 @@ public class TestFileCreation {
|
|||
// creates a file but does not close it
|
||||
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
|
||||
throws IOException {
|
||||
System.out.println("createFile: Created " + name + " with " + repl + " replica.");
|
||||
LOG.info("createFile: Created " + name + " with " + repl + " replica.");
|
||||
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
|
||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||
(short) repl, blockSize);
|
||||
|
@ -305,8 +308,8 @@ public class TestFileCreation {
|
|||
public void testFileCreationSetLocalInterface() throws IOException {
|
||||
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
|
||||
|
||||
// The mini cluster listens on the loopback so we can use it here
|
||||
checkFileCreation("lo", false);
|
||||
// Use wildcard address to force interface to be used
|
||||
checkFileCreation("0.0.0.0", false);
|
||||
|
||||
try {
|
||||
checkFileCreation("bogus-interface", false);
|
||||
|
@ -348,9 +351,9 @@ public class TestFileCreation {
|
|||
// check that / exists
|
||||
//
|
||||
Path path = new Path("/");
|
||||
System.out.println("Path : \"" + path.toString() + "\"");
|
||||
System.out.println(fs.getFileStatus(path).isDirectory());
|
||||
assertTrue("/ should be a directory",
|
||||
LOG.info("Path : \"" + path.toString() + "\"");
|
||||
LOG.info(fs.getFileStatus(path).isDirectory());
|
||||
assertTrue("/ should be a directory",
|
||||
fs.getFileStatus(path).isDirectory());
|
||||
|
||||
//
|
||||
|
@ -358,8 +361,8 @@ public class TestFileCreation {
|
|||
//
|
||||
Path dir1 = new Path("/test_dir");
|
||||
fs.mkdirs(dir1);
|
||||
System.out.println("createFile: Creating " + dir1.getName() +
|
||||
" for overwrite of existing directory.");
|
||||
LOG.info("createFile: Creating " + dir1.getName()
|
||||
+ " for overwrite of existing directory.");
|
||||
try {
|
||||
fs.create(dir1, true); // Create path, overwrite=true
|
||||
fs.close();
|
||||
|
@ -379,9 +382,9 @@ public class TestFileCreation {
|
|||
FSDataOutputStream stm = createFile(fs, file1, 1);
|
||||
|
||||
// verify that file exists in FS namespace
|
||||
assertTrue(file1 + " should be a file",
|
||||
assertTrue(file1 + " should be a file",
|
||||
fs.getFileStatus(file1).isFile());
|
||||
System.out.println("Path : \"" + file1 + "\"");
|
||||
LOG.info("Path : \"" + file1 + "\"");
|
||||
|
||||
// write to file
|
||||
writeFile(stm);
|
||||
|
@ -393,13 +396,13 @@ public class TestFileCreation {
|
|||
assertTrue(file1 + " should be of size " + fileSize +
|
||||
" but found to be of size " + len,
|
||||
len == fileSize);
|
||||
|
||||
|
||||
// verify the disk space the file occupied
|
||||
long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
|
||||
assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
|
||||
"but found to take " + diskSpace + " bytes", fileSize, diskSpace);
|
||||
|
||||
// Check storage usage
|
||||
|
||||
// Check storage usage
|
||||
// can't check capacities for real storage since the OS file system may be changing under us.
|
||||
if (simulatedStorage) {
|
||||
DataNode dn = cluster.getDataNodes().get(0);
|
||||
|
@ -436,7 +439,7 @@ public class TestFileCreation {
|
|||
FSDataOutputStream stm1 = createFile(fs, file1, 1);
|
||||
FSDataOutputStream stm2 = createFile(fs, file2, 1);
|
||||
FSDataOutputStream stm3 = createFile(localfs, file3, 1);
|
||||
System.out.println("DeleteOnExit: Created files.");
|
||||
LOG.info("DeleteOnExit: Created files.");
|
||||
|
||||
// write to files and close. Purposely, do not close file2.
|
||||
writeFile(stm1);
|
||||
|
@ -467,7 +470,7 @@ public class TestFileCreation {
|
|||
!fs.exists(file2));
|
||||
assertTrue(file3 + " still exists inspite of deletOnExit set.",
|
||||
!localfs.exists(file3));
|
||||
System.out.println("DeleteOnExit successful.");
|
||||
LOG.info("DeleteOnExit successful.");
|
||||
|
||||
} finally {
|
||||
IOUtils.closeStream(fs);
|
||||
|
@ -563,7 +566,7 @@ public class TestFileCreation {
|
|||
// verify that file exists in FS namespace
|
||||
assertTrue(file1 + " should be a file",
|
||||
fs.getFileStatus(file1).isFile());
|
||||
System.out.println("Path : \"" + file1 + "\"");
|
||||
LOG.info("Path : \"" + file1 + "\"");
|
||||
|
||||
// kill the datanode
|
||||
cluster.shutdownDataNodes();
|
||||
|
@ -575,7 +578,7 @@ public class TestFileCreation {
|
|||
if (info.length == 0) {
|
||||
break;
|
||||
}
|
||||
System.out.println("testFileCreationError1: waiting for datanode " +
|
||||
LOG.info("testFileCreationError1: waiting for datanode " +
|
||||
" to die.");
|
||||
try {
|
||||
Thread.sleep(1000);
|
||||
|
@ -597,7 +600,7 @@ public class TestFileCreation {
|
|||
// bad block allocations were cleaned up earlier.
|
||||
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
||||
file1.toString(), 0, Long.MAX_VALUE);
|
||||
System.out.println("locations = " + locations.locatedBlockCount());
|
||||
LOG.info("locations = " + locations.locatedBlockCount());
|
||||
assertTrue("Error blocks were not cleaned up",
|
||||
locations.locatedBlockCount() == 0);
|
||||
} finally {
|
||||
|
@ -613,7 +616,7 @@ public class TestFileCreation {
|
|||
@Test
|
||||
public void testFileCreationError2() throws IOException {
|
||||
long leasePeriod = 1000;
|
||||
System.out.println("testFileCreationError2 start");
|
||||
LOG.info("testFileCreationError2 start");
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
|
||||
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||
|
@ -632,24 +635,24 @@ public class TestFileCreation {
|
|||
//
|
||||
Path file1 = new Path("/filestatus.dat");
|
||||
createFile(dfs, file1, 1);
|
||||
System.out.println("testFileCreationError2: "
|
||||
LOG.info("testFileCreationError2: "
|
||||
+ "Created file filestatus.dat with one replicas.");
|
||||
|
||||
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
||||
file1.toString(), 0, Long.MAX_VALUE);
|
||||
System.out.println("testFileCreationError2: "
|
||||
LOG.info("testFileCreationError2: "
|
||||
+ "The file has " + locations.locatedBlockCount() + " blocks.");
|
||||
|
||||
// add one block to the file
|
||||
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
|
||||
client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
|
||||
System.out.println("testFileCreationError2: "
|
||||
LOG.info("testFileCreationError2: "
|
||||
+ "Added block " + location.getBlock());
|
||||
|
||||
locations = client.getNamenode().getBlockLocations(file1.toString(),
|
||||
0, Long.MAX_VALUE);
|
||||
int count = locations.locatedBlockCount();
|
||||
System.out.println("testFileCreationError2: "
|
||||
LOG.info("testFileCreationError2: "
|
||||
+ "The file now has " + count + " blocks.");
|
||||
|
||||
// set the soft and hard limit to be 1 second so that the
|
||||
|
@ -665,10 +668,10 @@ public class TestFileCreation {
|
|||
// verify that the last block was synchronized.
|
||||
locations = client.getNamenode().getBlockLocations(file1.toString(),
|
||||
0, Long.MAX_VALUE);
|
||||
System.out.println("testFileCreationError2: "
|
||||
LOG.info("testFileCreationError2: "
|
||||
+ "locations = " + locations.locatedBlockCount());
|
||||
assertEquals(0, locations.locatedBlockCount());
|
||||
System.out.println("testFileCreationError2 successful");
|
||||
LOG.info("testFileCreationError2 successful");
|
||||
} finally {
|
||||
IOUtils.closeStream(dfs);
|
||||
cluster.shutdown();
|
||||
|
@ -678,7 +681,7 @@ public class TestFileCreation {
|
|||
/** test addBlock(..) when replication<min and excludeNodes==null. */
|
||||
@Test
|
||||
public void testFileCreationError3() throws IOException {
|
||||
System.out.println("testFileCreationError3 start");
|
||||
LOG.info("testFileCreationError3 start");
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
// create cluster
|
||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
|
@ -699,7 +702,7 @@ public class TestFileCreation {
|
|||
FileSystem.LOG.info("GOOD!", ioe);
|
||||
}
|
||||
|
||||
System.out.println("testFileCreationError3 successful");
|
||||
LOG.info("testFileCreationError3 successful");
|
||||
} finally {
|
||||
IOUtils.closeStream(dfs);
|
||||
cluster.shutdown();
|
||||
|
@ -732,7 +735,7 @@ public class TestFileCreation {
|
|||
// create a new file.
|
||||
Path file1 = new Path("/filestatus.dat");
|
||||
HdfsDataOutputStream stm = create(fs, file1, 1);
|
||||
System.out.println("testFileCreationNamenodeRestart: "
|
||||
LOG.info("testFileCreationNamenodeRestart: "
|
||||
+ "Created file " + file1);
|
||||
assertEquals(file1 + " should be replicated to 1 datanode.", 1,
|
||||
stm.getCurrentBlockReplication());
|
||||
|
@ -746,7 +749,7 @@ public class TestFileCreation {
|
|||
// rename file wile keeping it open.
|
||||
Path fileRenamed = new Path("/filestatusRenamed.dat");
|
||||
fs.rename(file1, fileRenamed);
|
||||
System.out.println("testFileCreationNamenodeRestart: "
|
||||
LOG.info("testFileCreationNamenodeRestart: "
|
||||
+ "Renamed file " + file1 + " to " +
|
||||
fileRenamed);
|
||||
file1 = fileRenamed;
|
||||
|
@ -755,7 +758,7 @@ public class TestFileCreation {
|
|||
//
|
||||
Path file2 = new Path("/filestatus2.dat");
|
||||
FSDataOutputStream stm2 = createFile(fs, file2, 1);
|
||||
System.out.println("testFileCreationNamenodeRestart: "
|
||||
LOG.info("testFileCreationNamenodeRestart: "
|
||||
+ "Created file " + file2);
|
||||
|
||||
// create yet another new file with full path name.
|
||||
|
@ -763,21 +766,21 @@ public class TestFileCreation {
|
|||
//
|
||||
Path file3 = new Path("/user/home/fullpath.dat");
|
||||
FSDataOutputStream stm3 = createFile(fs, file3, 1);
|
||||
System.out.println("testFileCreationNamenodeRestart: "
|
||||
LOG.info("testFileCreationNamenodeRestart: "
|
||||
+ "Created file " + file3);
|
||||
Path file4 = new Path("/user/home/fullpath4.dat");
|
||||
FSDataOutputStream stm4 = createFile(fs, file4, 1);
|
||||
System.out.println("testFileCreationNamenodeRestart: "
|
||||
LOG.info("testFileCreationNamenodeRestart: "
|
||||
+ "Created file " + file4);
|
||||
|
||||
fs.mkdirs(new Path("/bin"));
|
||||
fs.rename(new Path("/user/home"), new Path("/bin"));
|
||||
Path file3new = new Path("/bin/home/fullpath.dat");
|
||||
System.out.println("testFileCreationNamenodeRestart: "
|
||||
LOG.info("testFileCreationNamenodeRestart: "
|
||||
+ "Renamed file " + file3 + " to " +
|
||||
file3new);
|
||||
Path file4new = new Path("/bin/home/fullpath4.dat");
|
||||
System.out.println("testFileCreationNamenodeRestart: "
|
||||
LOG.info("testFileCreationNamenodeRestart: "
|
||||
+ "Renamed file " + file4 + " to " +
|
||||
file4new);
|
||||
|
||||
|
@ -837,14 +840,14 @@ public class TestFileCreation {
|
|||
DFSClient client = fs.dfs;
|
||||
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
||||
file1.toString(), 0, Long.MAX_VALUE);
|
||||
System.out.println("locations = " + locations.locatedBlockCount());
|
||||
LOG.info("locations = " + locations.locatedBlockCount());
|
||||
assertTrue("Error blocks were not cleaned up for file " + file1,
|
||||
locations.locatedBlockCount() == 3);
|
||||
|
||||
// verify filestatus2.dat
|
||||
locations = client.getNamenode().getBlockLocations(
|
||||
file2.toString(), 0, Long.MAX_VALUE);
|
||||
System.out.println("locations = " + locations.locatedBlockCount());
|
||||
LOG.info("locations = " + locations.locatedBlockCount());
|
||||
assertTrue("Error blocks were not cleaned up for file " + file2,
|
||||
locations.locatedBlockCount() == 1);
|
||||
} finally {
|
||||
|
@ -859,7 +862,7 @@ public class TestFileCreation {
|
|||
@Test
|
||||
public void testDFSClientDeath() throws IOException, InterruptedException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
System.out.println("Testing adbornal client death.");
|
||||
LOG.info("Testing adbornal client death.");
|
||||
if (simulatedStorage) {
|
||||
SimulatedFSDataset.setFactory(conf);
|
||||
}
|
||||
|
@ -873,7 +876,7 @@ public class TestFileCreation {
|
|||
//
|
||||
Path file1 = new Path("/clienttest.dat");
|
||||
FSDataOutputStream stm = createFile(fs, file1, 1);
|
||||
System.out.println("Created file clienttest.dat");
|
||||
LOG.info("Created file clienttest.dat");
|
||||
|
||||
// write to file
|
||||
writeFile(stm);
|
||||
|
@ -889,7 +892,7 @@ public class TestFileCreation {
|
|||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Test file creation using createNonRecursive().
|
||||
*/
|
||||
|
@ -970,7 +973,7 @@ public class TestFileCreation {
|
|||
static IOException createNonRecursive(FileSystem fs, Path name,
|
||||
int repl, EnumSet<CreateFlag> flag) throws IOException {
|
||||
try {
|
||||
System.out.println("createNonRecursive: Attempting to create " + name +
|
||||
LOG.info("createNonRecursive: Attempting to create " + name +
|
||||
" with " + repl + " replica.");
|
||||
int bufferSize = fs.getConf()
|
||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
||||
|
@ -1004,9 +1007,9 @@ public class TestFileCreation {
|
|||
|
||||
try {
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
||||
|
||||
Path[] p = {new Path("/foo"), new Path("/bar")};
|
||||
|
||||
|
||||
//write 2 files at the same time
|
||||
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
|
||||
int i = 0;
|
||||
|
@ -1038,9 +1041,9 @@ public class TestFileCreation {
|
|||
|
||||
try {
|
||||
FileSystem fs = cluster.getFileSystem();
|
||||
|
||||
|
||||
Path[] p = {new Path("/foo"), new Path("/bar")};
|
||||
|
||||
|
||||
//write 2 files at the same time
|
||||
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
|
||||
int i = 0;
|
||||
|
@ -1068,7 +1071,7 @@ public class TestFileCreation {
|
|||
*/
|
||||
@Test
|
||||
public void testLeaseExpireHardLimit() throws Exception {
|
||||
System.out.println("testLeaseExpireHardLimit start");
|
||||
LOG.info("testLeaseExpireHardLimit start");
|
||||
final long leasePeriod = 1000;
|
||||
final int DATANODE_NUM = 3;
|
||||
|
||||
|
@ -1113,20 +1116,20 @@ public class TestFileCreation {
|
|||
successcount++;
|
||||
}
|
||||
}
|
||||
System.out.println("successcount=" + successcount);
|
||||
assertTrue(successcount > 0);
|
||||
LOG.info("successcount=" + successcount);
|
||||
assertTrue(successcount > 0);
|
||||
} finally {
|
||||
IOUtils.closeStream(dfs);
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
||||
System.out.println("testLeaseExpireHardLimit successful");
|
||||
LOG.info("testLeaseExpireHardLimit successful");
|
||||
}
|
||||
|
||||
// test closing file system before all file handles are closed.
|
||||
@Test
|
||||
public void testFsClose() throws Exception {
|
||||
System.out.println("test file system close start");
|
||||
LOG.info("test file system close start");
|
||||
final int DATANODE_NUM = 3;
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
@ -1147,7 +1150,7 @@ public class TestFileCreation {
|
|||
// close file system without closing file
|
||||
dfs.close();
|
||||
} finally {
|
||||
System.out.println("testFsClose successful");
|
||||
LOG.info("testFsClose successful");
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
|
@ -1155,7 +1158,7 @@ public class TestFileCreation {
|
|||
// test closing file after cluster is shutdown
|
||||
@Test
|
||||
public void testFsCloseAfterClusterShutdown() throws IOException {
|
||||
System.out.println("test testFsCloseAfterClusterShutdown start");
|
||||
LOG.info("test testFsCloseAfterClusterShutdown start");
|
||||
final int DATANODE_NUM = 3;
|
||||
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
|
@ -1186,13 +1189,13 @@ public class TestFileCreation {
|
|||
boolean hasException = false;
|
||||
try {
|
||||
out.close();
|
||||
System.out.println("testFsCloseAfterClusterShutdown: Error here");
|
||||
LOG.info("testFsCloseAfterClusterShutdown: Error here");
|
||||
} catch (IOException e) {
|
||||
hasException = true;
|
||||
}
|
||||
assertTrue("Failed to close file after cluster shutdown", hasException);
|
||||
} finally {
|
||||
System.out.println("testFsCloseAfterClusterShutdown successful");
|
||||
LOG.info("testFsCloseAfterClusterShutdown successful");
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
|
@ -1211,7 +1214,7 @@ public class TestFileCreation {
|
|||
public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
|
||||
doCreateTest(CreationMethod.DIRECT_NN_RPC);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Another regression test for HDFS-3626. This one creates files using
|
||||
* a Path instantiated from a string object.
|
||||
|
@ -1231,7 +1234,7 @@ public class TestFileCreation {
|
|||
throws Exception {
|
||||
doCreateTest(CreationMethod.PATH_FROM_URI);
|
||||
}
|
||||
|
||||
|
||||
private enum CreationMethod {
|
||||
DIRECT_NN_RPC,
|
||||
PATH_FROM_URI,
|
||||
|
@ -1246,7 +1249,7 @@ public class TestFileCreation {
|
|||
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
|
||||
|
||||
for (String pathStr : NON_CANONICAL_PATHS) {
|
||||
System.out.println("Creating " + pathStr + " by " + method);
|
||||
LOG.info("Creating " + pathStr + " by " + method);
|
||||
switch (method) {
|
||||
case DIRECT_NN_RPC:
|
||||
try {
|
||||
|
@ -1261,7 +1264,7 @@ public class TestFileCreation {
|
|||
// So, we expect all of them to fail.
|
||||
}
|
||||
break;
|
||||
|
||||
|
||||
case PATH_FROM_URI:
|
||||
case PATH_FROM_STRING:
|
||||
// Unlike the above direct-to-NN case, we expect these to succeed,
|
||||
|
@ -1279,7 +1282,7 @@ public class TestFileCreation {
|
|||
throw new AssertionError("bad method: " + method);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
cluster.restartNameNode();
|
||||
|
||||
} finally {
|
||||
|
@ -1336,7 +1339,7 @@ public class TestFileCreation {
|
|||
dfs.mkdirs(new Path("/foo/dir"));
|
||||
String file = "/foo/dir/file";
|
||||
Path filePath = new Path(file);
|
||||
|
||||
|
||||
// Case 1: Create file with overwrite, check the blocks of old file
|
||||
// are cleaned after creating with overwrite
|
||||
NameNode nn = cluster.getNameNode();
|
||||
|
@ -1350,7 +1353,7 @@ public class TestFileCreation {
|
|||
} finally {
|
||||
out.close();
|
||||
}
|
||||
|
||||
|
||||
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
|
||||
nn, file, 0, fileSize);
|
||||
assertBlocks(bm, oldBlocks, true);
|
||||
|
@ -1363,7 +1366,7 @@ public class TestFileCreation {
|
|||
out.close();
|
||||
}
|
||||
dfs.deleteOnExit(filePath);
|
||||
|
||||
|
||||
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
|
||||
nn, file, 0, fileSize);
|
||||
assertBlocks(bm, newBlocks, true);
|
||||
|
@ -1377,7 +1380,7 @@ public class TestFileCreation {
|
|||
in.close();
|
||||
}
|
||||
Assert.assertArrayEquals(newData, result);
|
||||
|
||||
|
||||
// Case 2: Restart NN, check the file
|
||||
cluster.restartNameNode();
|
||||
nn = cluster.getNameNode();
|
||||
|
@ -1388,13 +1391,13 @@ public class TestFileCreation {
|
|||
in.close();
|
||||
}
|
||||
Assert.assertArrayEquals(newData, result);
|
||||
|
||||
|
||||
// Case 3: Save new checkpoint and restart NN, check the file
|
||||
NameNodeAdapter.enterSafeMode(nn, false);
|
||||
NameNodeAdapter.saveNamespace(nn);
|
||||
cluster.restartNameNode();
|
||||
nn = cluster.getNameNode();
|
||||
|
||||
|
||||
in = dfs.open(filePath);
|
||||
try {
|
||||
result = readAll(in);
|
||||
|
@ -1411,8 +1414,8 @@ public class TestFileCreation {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
|
||||
|
||||
private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
|
||||
boolean exist) {
|
||||
for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
|
||||
if (exist) {
|
||||
|
@ -1424,7 +1427,7 @@ public class TestFileCreation {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
private byte[] readAll(FSDataInputStream in) throws IOException {
|
||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||
byte[] buffer = new byte[1024];
|
||||
|
|
|
@ -195,7 +195,8 @@ public class BlockReaderTestUtil {
|
|||
return new BlockReaderFactory(fs.getClient().getConf()).
|
||||
setInetSocketAddress(targetAddr).
|
||||
setBlock(block).
|
||||
setFileName(targetAddr.toString()+ ":" + block.getBlockId()).
|
||||
setFileName(NetUtils.getSocketAddressString(targetAddr) + ":" + block
|
||||
.getBlockId()).
|
||||
setBlockToken(testBlock.getBlockToken()).
|
||||
setStartOffset(offset).
|
||||
setLength(lenToRead).
|
||||
|
|
|
@ -921,14 +921,14 @@ public class TestQuorumJournalManager {
|
|||
GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
|
||||
"3");
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testToString() throws Exception {
|
||||
GenericTestUtils.assertMatches(
|
||||
qjm.toString(),
|
||||
"QJM to \\[127.0.0.1:\\d+, 127.0.0.1:\\d+, 127.0.0.1:\\d+\\]");
|
||||
"QJM to \\[localhost:\\d+, localhost:\\d+, localhost:\\d+\\]");
|
||||
}
|
||||
|
||||
|
||||
@Test
|
||||
public void testSelectInputStreamsNotOnBoundary() throws Exception {
|
||||
final int txIdsPerSegment = 10;
|
||||
|
|
|
@ -110,13 +110,19 @@ public class TestHostFileManager {
|
|||
includedNodes.add(entry("127.0.0.1:12345"));
|
||||
includedNodes.add(entry("localhost:12345"));
|
||||
includedNodes.add(entry("127.0.0.1:12345"));
|
||||
|
||||
includedNodes.add(entry("[::1]:42"));
|
||||
includedNodes.add(entry("[0:0:0:0:0:0:0:1]:42"));
|
||||
includedNodes.add(entry("[::1]:42"));
|
||||
|
||||
includedNodes.add(entry("127.0.0.2"));
|
||||
|
||||
excludedNodes.add(entry("127.0.0.1:12346"));
|
||||
excludedNodes.add(entry("127.0.30.1:12346"));
|
||||
excludedNodes.add(entry("[::1]:24"));
|
||||
|
||||
Assert.assertEquals(2, includedNodes.size());
|
||||
Assert.assertEquals(2, excludedNodes.size());
|
||||
Assert.assertEquals(3, includedNodes.size());
|
||||
Assert.assertEquals(3, excludedNodes.size());
|
||||
|
||||
hm.refresh(includedNodes, excludedNodes);
|
||||
|
||||
|
@ -125,20 +131,33 @@ public class TestHostFileManager {
|
|||
Map<String, DatanodeDescriptor> dnMap = (Map<String,
|
||||
DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
|
||||
|
||||
// After the de-duplication, there should be only one DN from the included
|
||||
// After the de-duplication, there should be three DN from the included
|
||||
// nodes declared as dead.
|
||||
Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
|
||||
.DatanodeReportType.ALL).size());
|
||||
Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
|
||||
.DatanodeReportType.DEAD).size());
|
||||
dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
|
||||
"localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
|
||||
Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
|
||||
.DatanodeReportType.DEAD).size());
|
||||
dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
|
||||
"127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
|
||||
Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
|
||||
.DatanodeReportType.DEAD).size());
|
||||
Assert.assertEquals(3,
|
||||
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL)
|
||||
.size());
|
||||
Assert.assertEquals(3,
|
||||
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||
.size());
|
||||
dnMap.put("uuid-foo", new DatanodeDescriptor(
|
||||
new DatanodeID("127.0.0.1", "localhost", "uuid-foo", 12345, 1020, 1021,
|
||||
1022)));
|
||||
Assert.assertEquals(2,
|
||||
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||
.size());
|
||||
dnMap.put("uuid-bar", new DatanodeDescriptor(
|
||||
new DatanodeID("127.0.0.2", "127.0.0.2", "uuid-bar", 12345, 1020, 1021,
|
||||
1022)));
|
||||
Assert.assertEquals(1,
|
||||
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||
.size());
|
||||
dnMap.put("uuid-baz", new DatanodeDescriptor(
|
||||
new DatanodeID("[::1]", "localhost", "uuid-baz", 42, 1020, 1021,
|
||||
1022)));
|
||||
Assert.assertEquals(0,
|
||||
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||
.size());
|
||||
|
||||
DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
|
||||
".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
|
||||
DFSTestUtil.setDatanodeDead(spam);
|
||||
|
|
|
@ -22,6 +22,8 @@ import org.junit.Test;
|
|||
import org.mockito.Mockito;
|
||||
|
||||
import java.io.File;
|
||||
import java.net.InetAddress;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.Random;
|
||||
import java.util.UUID;
|
||||
import org.slf4j.Logger;
|
||||
|
@ -56,13 +58,37 @@ public class TestBlockPoolSliceStorage {
|
|||
}
|
||||
}
|
||||
|
||||
private String makeRandomIpAddress() {
|
||||
private String makeRandomIpv4Address() {
|
||||
return rand.nextInt(256) + "." +
|
||||
rand.nextInt(256) + "." +
|
||||
rand.nextInt(256) + "." +
|
||||
rand.nextInt(256);
|
||||
}
|
||||
|
||||
private String makeRandomIpv6Address() {
|
||||
byte[] bytes = new byte[16];
|
||||
rand.nextBytes(bytes);
|
||||
InetAddress adr = null;
|
||||
try {
|
||||
adr = InetAddress.getByAddress("unused", bytes);
|
||||
} catch (UnknownHostException uhe) {
|
||||
// Should never happen
|
||||
LOG.error("UnknownHostException " + uhe);
|
||||
assertThat(true, is(false));
|
||||
}
|
||||
String addrString = adr.getHostAddress().replaceAll(":", ".");
|
||||
|
||||
return "[" + addrString + "]";
|
||||
}
|
||||
|
||||
private String makeRandomIpAddress() {
|
||||
if (rand.nextBoolean()) {
|
||||
return makeRandomIpv4Address();
|
||||
} else {
|
||||
return makeRandomIpv6Address();
|
||||
}
|
||||
}
|
||||
|
||||
private String makeRandomBlockpoolId() {
|
||||
return "BP-" + rand.nextInt(Integer.MAX_VALUE) +
|
||||
"-" + makeRandomIpAddress() +
|
||||
|
|
|
@ -151,14 +151,15 @@ public class TestHostsFiles {
|
|||
|
||||
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||
hostsFileWriter.initialize(conf, "temp/decommission");
|
||||
hostsFileWriter.initIncludeHosts(new String[]
|
||||
{"localhost:52","127.0.0.1:7777"});
|
||||
hostsFileWriter.initIncludeHosts(
|
||||
new String[] {"localhost:52", "127.0.0.1:7777", "[::1]:42",
|
||||
"[0:0:0:0:0:0:0:1]:24"});
|
||||
|
||||
MiniDFSCluster cluster = null;
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
|
||||
assertTrue(ns.getNumDeadDataNodes() == 2);
|
||||
assertTrue(ns.getNumDeadDataNodes() == 4);
|
||||
assertTrue(ns.getNumLiveDataNodes() == 0);
|
||||
|
||||
// Testing using MBeans
|
||||
|
@ -166,7 +167,7 @@ public class TestHostsFiles {
|
|||
ObjectName mxbeanName = new ObjectName(
|
||||
"Hadoop:service=NameNode,name=FSNamesystemState");
|
||||
String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
|
||||
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
|
||||
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 4);
|
||||
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
|
|
|
@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.namenode;
|
|||
import static org.junit.Assert.assertFalse;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.hamcrest.core.AnyOf.anyOf;
|
||||
import static org.hamcrest.core.IsNot.not;
|
||||
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.Test;
|
||||
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
|
@ -30,6 +32,8 @@ import org.apache.hadoop.http.HttpConfig;
|
|||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
|
||||
import java.net.InetAddress;
|
||||
import java.net.Inet6Address;
|
||||
import java.io.File;
|
||||
import java.io.IOException;
|
||||
|
||||
|
@ -55,6 +59,7 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
public static final Logger LOG =
|
||||
LoggerFactory.getLogger(TestNameNodeRespectsBindHostKeys.class);
|
||||
private static final String WILDCARD_ADDRESS = "0.0.0.0";
|
||||
private static final String IPV6_WILDCARD_ADDRESS = "0:0:0:0:0:0:0:0";
|
||||
private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
|
||||
private static String keystoresDir;
|
||||
private static String sslConfDir;
|
||||
|
@ -79,9 +84,9 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
public void testRpcBindHostKey() throws IOException {
|
||||
Configuration conf = new HdfsConfiguration();
|
||||
MiniDFSCluster cluster = null;
|
||||
|
||||
|
||||
LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
|
||||
|
||||
|
||||
// NN should not bind the wildcard address by default.
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
|
@ -97,7 +102,7 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
}
|
||||
|
||||
LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
|
||||
|
||||
|
||||
// Tell NN to bind the wildcard address.
|
||||
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
|
||||
|
||||
|
@ -106,13 +111,36 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = getRpcServerAddress(cluster);
|
||||
assertThat("Bind address " + address + " is not wildcard.",
|
||||
address, is("/" + WILDCARD_ADDRESS));
|
||||
assertThat("Bind address " + address + " is not wildcard.", address,
|
||||
anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
InetAddress localAddr = InetAddress.getLocalHost();
|
||||
if (localAddr instanceof Inet6Address) {
|
||||
// Tell NN to bind the IPv6 wildcard address.
|
||||
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPV6_WILDCARD_ADDRESS);
|
||||
|
||||
// Verify that NN binds wildcard address now.
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = getRpcServerAddress(cluster);
|
||||
assertThat("Bind address " + address + " is not wildcard.",
|
||||
address, anyOf(
|
||||
is("/" + WILDCARD_ADDRESS),
|
||||
is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG.info("Not testing IPv6 binding as IPv6 us not supported");
|
||||
}
|
||||
}
|
||||
|
||||
@Test (timeout=300000)
|
||||
|
@ -121,7 +149,7 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
MiniDFSCluster cluster = null;
|
||||
|
||||
LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
|
||||
|
||||
|
||||
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||
|
||||
// NN should not bind the wildcard address by default.
|
||||
|
@ -140,6 +168,27 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
|
||||
LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
|
||||
|
||||
InetAddress localAddr = InetAddress.getLocalHost();
|
||||
if (localAddr instanceof Inet6Address) {
|
||||
// Tell NN to bind the IPv6 wildcard address.
|
||||
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPV6_WILDCARD_ADDRESS);
|
||||
|
||||
// Verify that NN binds wildcard address now.
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = getRpcServerAddress(cluster);
|
||||
assertThat("Bind address " + address + " is not wildcard.", address,
|
||||
anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
LOG.info("Not testing IPv6 binding as IPv6 us not supported");
|
||||
}
|
||||
|
||||
// Tell NN to bind the wildcard address.
|
||||
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
|
||||
|
||||
|
@ -148,8 +197,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = getServiceRpcServerAddress(cluster);
|
||||
assertThat("Bind address " + address + " is not wildcard.",
|
||||
address, is("/" + WILDCARD_ADDRESS));
|
||||
assertThat("Bind address " + address + " is not wildcard.", address,
|
||||
anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
|
@ -211,7 +260,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = cluster.getNameNode().getHttpAddress().toString();
|
||||
String address = NetUtils.getSocketAddressString(
|
||||
cluster.getNameNode().getHttpAddress());
|
||||
assertFalse("HTTP Bind address not expected to be wildcard by default.",
|
||||
address.startsWith(WILDCARD_ADDRESS));
|
||||
} finally {
|
||||
|
@ -231,7 +281,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = cluster.getNameNode().getHttpAddress().toString();
|
||||
String address = NetUtils.getSocketAddressString(
|
||||
cluster.getNameNode().getHttpAddress());
|
||||
assertTrue("HTTP Bind address " + address + " is not wildcard.",
|
||||
address.startsWith(WILDCARD_ADDRESS));
|
||||
} finally {
|
||||
|
@ -285,7 +336,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = cluster.getNameNode().getHttpsAddress().toString();
|
||||
String address = NetUtils.getSocketAddressString(
|
||||
cluster.getNameNode().getHttpsAddress());
|
||||
assertFalse("HTTP Bind address not expected to be wildcard by default.",
|
||||
address.startsWith(WILDCARD_ADDRESS));
|
||||
} finally {
|
||||
|
@ -305,7 +357,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
|||
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
String address = cluster.getNameNode().getHttpsAddress().toString();
|
||||
String address = NetUtils
|
||||
.getSocketAddressString(cluster.getNameNode().getHttpsAddress());
|
||||
assertTrue("HTTP Bind address " + address + " is not wildcard.",
|
||||
address.startsWith(WILDCARD_ADDRESS));
|
||||
} finally {
|
||||
|
|
|
@ -25,7 +25,9 @@
|
|||
package org.apache.hadoop.hdfs.server.namenode;
|
||||
|
||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertThat;
|
||||
import static org.hamcrest.core.Is.is;
|
||||
import static org.hamcrest.core.AnyOf.anyOf;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -43,14 +45,18 @@ public class TestNameNodeRpcServer {
|
|||
// The name node in MiniDFSCluster only binds to 127.0.0.1.
|
||||
// We can set the bind address to 0.0.0.0 to make it listen
|
||||
// to all interfaces.
|
||||
// On IPv4-only machines it will return that it is listening on 0.0.0.0
|
||||
// On dual-stack or IPv6-only machines it will return 0:0:0:0:0:0:0:0
|
||||
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
|
||||
MiniDFSCluster cluster = null;
|
||||
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||
cluster.waitActive();
|
||||
assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
|
||||
.getClientRpcServer().getListenerAddress().getHostName());
|
||||
String listenerAddress = ((NameNodeRpcServer)cluster.getNameNodeRpc())
|
||||
.getClientRpcServer().getListenerAddress().getHostName();
|
||||
assertThat("Bind address " + listenerAddress + " is not wildcard.",
|
||||
listenerAddress, anyOf(is("0.0.0.0"), is("0:0:0:0:0:0:0:0")));
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
|
|
|
@ -141,11 +141,14 @@ public class HostsFileWriter {
|
|||
includeHosts.toString());
|
||||
} else {
|
||||
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
||||
for(String hostNameAndPort : hostNameAndPorts) {
|
||||
String[] hostAndPort = hostNameAndPort.split(":");
|
||||
for (String hostNameAndPort : hostNameAndPorts) {
|
||||
int i = hostNameAndPort.lastIndexOf(':');
|
||||
String port =
|
||||
hostNameAndPort.substring(hostNameAndPort.lastIndexOf(":") + 1);
|
||||
String addr = hostNameAndPort.substring(0, i);
|
||||
DatanodeAdminProperties dn = new DatanodeAdminProperties();
|
||||
dn.setHostName(hostAndPort[0]);
|
||||
dn.setPort(Integer.parseInt(hostAndPort[1]));
|
||||
dn.setHostName(addr);
|
||||
dn.setPort(Integer.parseInt(port));
|
||||
allDNs.add(dn);
|
||||
}
|
||||
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
|
||||
|
|
|
@ -0,0 +1,205 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hdfs.util;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||
import org.apache.hadoop.net.unix.DomainSocket;
|
||||
import org.junit.Test;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.OutputStream;
|
||||
import java.net.Inet4Address;
|
||||
import java.net.InetAddress;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
import java.nio.channels.ReadableByteChannel;
|
||||
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
/**
|
||||
* This is a very basic, very fast test to test IPv6 parsing issues
|
||||
* as we find them.
|
||||
* It does NOT depend on having a working IPv6 stack and should
|
||||
* succeed even if run
|
||||
* with "-Djava.net.preferIPv4Stack=true"
|
||||
*/
|
||||
public class TestIPv6FormatCompatibility {
|
||||
private static final String IPV6_LOOPBACK_LONG_STRING = "0:0:0:0:0:0:0:1";
|
||||
private static final String IPV6_SAMPLE_ADDRESS =
|
||||
"2a03:2880:2130:cf05:face:b00c:0:1";
|
||||
private static final String IPV6_LOOPBACK_SHORT_STRING = "::1";
|
||||
private static final String IPV4_LOOPBACK_WITH_PORT = "127.0.0.1:10";
|
||||
private static final String IPV6_LOOPBACK_WITH_PORT =
|
||||
"[" + IPV6_LOOPBACK_LONG_STRING + "]:10";
|
||||
private static final String IPV6_SAMPLE_WITH_PORT =
|
||||
"[" + IPV6_SAMPLE_ADDRESS + "]:10";
|
||||
private static final InetAddress IPV6LOOPBACK =
|
||||
InetAddresses.forString(IPV6_LOOPBACK_LONG_STRING);
|
||||
private static final InetAddress IPV4LOOPBACK =
|
||||
Inet4Address.getLoopbackAddress();
|
||||
private static final InetAddress IPV6SAMPLE =
|
||||
InetAddresses.forString(IPV6_SAMPLE_ADDRESS);
|
||||
private static final String IPV4_LOOPBACK_STRING =
|
||||
IPV4LOOPBACK.getHostAddress();
|
||||
|
||||
private static final Log LOG =
|
||||
LogFactory.getLog(TestIPv6FormatCompatibility.class);
|
||||
|
||||
// HDFS-8078 : note that we're expecting URI-style
|
||||
// (see Javadoc for java.net.URI or rfc2732)
|
||||
@Test public void testDatanodeIDXferAddressAddsBrackets() {
|
||||
DatanodeID ipv4localhost =
|
||||
new DatanodeID(IPV4_LOOPBACK_STRING, "localhost", "no-uuid", 10, 20, 30,
|
||||
40);
|
||||
DatanodeID ipv6localhost =
|
||||
new DatanodeID(IPV6_LOOPBACK_LONG_STRING, "localhost", "no-uuid", 10,
|
||||
20, 30, 40);
|
||||
DatanodeID ipv6sample =
|
||||
new DatanodeID(IPV6_SAMPLE_ADDRESS, "ipv6.example.com", "no-uuid", 10,
|
||||
20, 30, 40);
|
||||
assertEquals("IPv6 should have brackets added", IPV6_LOOPBACK_WITH_PORT,
|
||||
ipv6localhost.getXferAddr(false));
|
||||
assertEquals("IPv6 should have brackets added", IPV6_SAMPLE_WITH_PORT,
|
||||
ipv6sample.getXferAddr(false));
|
||||
assertEquals("IPv4 should not have brackets added", IPV4_LOOPBACK_WITH_PORT,
|
||||
ipv4localhost.getXferAddr(false));
|
||||
}
|
||||
|
||||
// HDFS-8078
|
||||
@Test
|
||||
public void testDatanodeIDXferAddressShouldNormalizeIPv6() {
|
||||
DatanodeID ipv6short =
|
||||
new DatanodeID(IPV6_LOOPBACK_SHORT_STRING, "localhost", "no-uuid", 10,
|
||||
20, 30, 40);
|
||||
assertEquals("IPv6 should be normalized and not abbreviated",
|
||||
IPV6_LOOPBACK_WITH_PORT, ipv6short.getXferAddr(false));
|
||||
}
|
||||
|
||||
// HDFS-8078 : note that in some cases we're parsing the results of
|
||||
// java.net.SocketAddress.toString() \
|
||||
// which doesn't product the URI-style results, and we're splitting
|
||||
// this rather than producing the combined string to be consumed.
|
||||
@Test
|
||||
public void testGetPeerShouldFindFullIPAddress() {
|
||||
Peer ipv6SamplePeer = new MockInetPeer(IPV6SAMPLE, false);
|
||||
Peer ipv4loopback = new MockInetPeer(IPV4LOOPBACK, false);
|
||||
Peer ipv6loopback = new MockInetPeer(IPV6LOOPBACK, false);
|
||||
assertNotNull(DataTransferSaslUtil.getPeerAddress(ipv6SamplePeer));
|
||||
assertNotNull(DataTransferSaslUtil.getPeerAddress(ipv6loopback));
|
||||
assertNotNull(DataTransferSaslUtil.getPeerAddress(ipv4loopback));
|
||||
}
|
||||
|
||||
// HDFS-8078 : It looks like in some cases this could also produce URI-style
|
||||
// results, so we test both.
|
||||
@Test public void testGetPeerAccept() {
|
||||
Peer ipv6loopbackAsURI = new MockInetPeer(IPV6LOOPBACK, true);
|
||||
assertEquals("getPeer should still with URI-style [bracket]",
|
||||
IPV6_LOOPBACK_LONG_STRING,
|
||||
DataTransferSaslUtil.getPeerAddress(ipv6loopbackAsURI)
|
||||
.getHostAddress());
|
||||
}
|
||||
|
||||
/**
|
||||
* Mocks a Peer purely to test DataTransferSaslUtil,getPeerAddress() which
|
||||
* takes a Peer and consumers getRemoteAddressString().
|
||||
* All other functionality missing.
|
||||
*/
|
||||
private class MockInetPeer implements Peer {
|
||||
private SocketAddress sa;
|
||||
private boolean asURI;
|
||||
|
||||
MockInetPeer(InetAddress addr, boolean asURI) {
|
||||
sa = new InetSocketAddress(addr, 50010);
|
||||
this.asURI = asURI;
|
||||
}
|
||||
|
||||
@Override
|
||||
public ReadableByteChannel getInputStreamChannel() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setReadTimeout(int timeoutMs) throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getReceiveBufferSize() throws IOException {
|
||||
return 0;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean getTcpNoDelay() throws IOException {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void setWriteTimeout(int timeoutMs) throws IOException {
|
||||
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isClosed() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void close() throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getRemoteAddressString() {
|
||||
return sa.toString();
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getLocalAddressString() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public InputStream getInputStream() throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public OutputStream getOutputStream() throws IOException {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean isLocal() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public DomainSocket getDomainSocket() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasSecureChannel() {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.fs.RemoteIterator;
|
|||
import org.apache.hadoop.mapreduce.security.TokenCache;
|
||||
import org.apache.hadoop.net.NetworkTopology;
|
||||
import org.apache.hadoop.net.Node;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.apache.hadoop.net.NodeBase;
|
||||
import org.apache.hadoop.util.ReflectionUtils;
|
||||
import org.apache.hadoop.util.StopWatch;
|
||||
|
@ -712,19 +713,19 @@ public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
|
|||
|
||||
private String[] identifyHosts(int replicationFactor,
|
||||
Map<Node,NodeInfo> racksMap) {
|
||||
|
||||
|
||||
String [] retVal = new String[replicationFactor];
|
||||
|
||||
List <NodeInfo> rackList = new LinkedList<NodeInfo>();
|
||||
|
||||
List <NodeInfo> rackList = new LinkedList<NodeInfo>();
|
||||
|
||||
rackList.addAll(racksMap.values());
|
||||
|
||||
|
||||
// Sort the racks based on their contribution to this split
|
||||
sortInDescendingOrder(rackList);
|
||||
|
||||
boolean done = false;
|
||||
int index = 0;
|
||||
|
||||
|
||||
// Get the host list for all our aggregated items, sort
|
||||
// them and return the top entries
|
||||
for (NodeInfo ni: rackList) {
|
||||
|
@ -733,27 +734,27 @@ public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
|
|||
|
||||
List<NodeInfo>hostList = new LinkedList<NodeInfo>();
|
||||
hostList.addAll(hostSet);
|
||||
|
||||
|
||||
// Sort the hosts in this rack based on their contribution
|
||||
sortInDescendingOrder(hostList);
|
||||
|
||||
for (NodeInfo host: hostList) {
|
||||
// Strip out the port number from the host name
|
||||
retVal[index++] = host.node.getName().split(":")[0];
|
||||
retVal[index++] = NetUtils.getHostFromHostPort(host.node.getName());
|
||||
if (index == replicationFactor) {
|
||||
done = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
if (done == true) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
return retVal;
|
||||
}
|
||||
|
||||
private String[] fakeRacks(BlockLocation[] blkLocations, int index)
|
||||
|
||||
private String[] fakeRacks(BlockLocation[] blkLocations, int index)
|
||||
throws IOException {
|
||||
String[] allHosts = blkLocations[index].getHosts();
|
||||
String[] allTopos = new String[allHosts.length];
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.util;
|
|||
|
||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
|
||||
@Private
|
||||
@Unstable
|
||||
|
@ -56,10 +57,7 @@ public class HostUtil {
|
|||
public static String convertTrackerNameToHostName(String trackerName) {
|
||||
// Ugly!
|
||||
// Convert the trackerName to its host name
|
||||
int indexOfColon = trackerName.indexOf(":");
|
||||
String trackerHostName = (indexOfColon == -1) ?
|
||||
trackerName :
|
||||
trackerName.substring(0, indexOfColon);
|
||||
String trackerHostName = NetUtils.getHostFromHostPort(trackerName);
|
||||
return trackerHostName.substring("tracker_".length());
|
||||
}
|
||||
|
||||
|
|
|
@ -91,6 +91,7 @@ import org.apache.hadoop.yarn.webapp.WebApp;
|
|||
import org.apache.hadoop.yarn.webapp.WebApps;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -173,10 +174,12 @@ public class HistoryClientService extends AbstractService {
|
|||
.withXFSProtection(JHAdminConfig.MR_HISTORY_XFS_PREFIX)
|
||||
.withAppClientProtocol(appClientProtocol)
|
||||
.at(NetUtils.getHostPortString(bindAddress)).start(webApp);
|
||||
|
||||
String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];
|
||||
MRWebAppUtil.setJHSWebappURLWithoutScheme(conf,
|
||||
connectHost + ":" + webApp.getListenerAddress().getPort());
|
||||
|
||||
String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf);
|
||||
|
||||
MRWebAppUtil.setJHSWebappURLWithoutScheme(conf, HostAndPort
|
||||
.fromParts(HostAndPort.fromString(connectHost).getHost(),
|
||||
webApp.getListenerAddress().getPort()).toString());
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -37,6 +37,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
|||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
|
||||
/**
|
||||
* This class checks that RPCs can use specialized socket factories.
|
||||
*/
|
||||
|
@ -89,9 +91,9 @@ public class TestMRCJCSocketFactory {
|
|||
"org.apache.hadoop.ipc.DummySocketFactory");
|
||||
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
|
||||
String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
|
||||
String[] split = rmAddress.split(":");
|
||||
jconf.set(YarnConfiguration.RM_ADDRESS, split[0] + ':'
|
||||
+ (Integer.parseInt(split[1]) + 10));
|
||||
HostAndPort hp = HostAndPort.fromString(rmAddress);
|
||||
jconf.set("yarn.resourcemanager.address",
|
||||
hp.getHost() + ':' + (hp.getPort() + 10));
|
||||
client = new JobClient(jconf);
|
||||
|
||||
JobStatus[] jobs = client.jobsToComplete();
|
||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
|
|||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.mapreduce.util.HostUtil;
|
||||
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
|
||||
import org.apache.hadoop.util.GenericOptionsParser;
|
||||
import org.apache.hadoop.util.Shell;
|
||||
|
@ -342,7 +343,7 @@ public class ReliabilityTest extends Configured implements Tool {
|
|||
LOG.info(new Date() + " Stopping a few trackers");
|
||||
|
||||
for (String tracker : trackerNamesList) {
|
||||
String host = convertTrackerNameToHostName(tracker);
|
||||
String host = HostUtil.convertTrackerNameToHostName(tracker);
|
||||
LOG.info(new Date() + " Marking tracker on host: " + host);
|
||||
fos.write((host + "\n").getBytes());
|
||||
if (count++ >= trackerNamesList.size()/2) {
|
||||
|
@ -381,15 +382,6 @@ public class ReliabilityTest extends Configured implements Tool {
|
|||
}
|
||||
}
|
||||
|
||||
private String convertTrackerNameToHostName(String trackerName) {
|
||||
// Convert the trackerName to it's host name
|
||||
int indexOfColon = trackerName.indexOf(":");
|
||||
String trackerHostName = (indexOfColon == -1) ?
|
||||
trackerName :
|
||||
trackerName.substring(0, indexOfColon);
|
||||
return trackerHostName.substring("tracker_".length());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
private class KillTaskThread extends Thread {
|
||||
|
|
|
@ -151,6 +151,8 @@ import org.junit.Test;
|
|||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
|
||||
public class TestClientRedirect {
|
||||
|
||||
static {
|
||||
|
@ -325,9 +327,9 @@ public class TestClientRedirect {
|
|||
application.setYarnApplicationState(YarnApplicationState.FINISHED);
|
||||
application.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
|
||||
}
|
||||
String[] split = AMHOSTADDRESS.split(":");
|
||||
application.setHost(split[0]);
|
||||
application.setRpcPort(Integer.parseInt(split[1]));
|
||||
HostAndPort hp = HostAndPort.fromString(AMHOSTADDRESS);
|
||||
application.setHost(hp.getHost());
|
||||
application.setRpcPort(hp.getPort());
|
||||
application.setUser("TestClientRedirect-user");
|
||||
application.setName("N/A");
|
||||
application.setQueue("N/A");
|
||||
|
|
|
@ -834,20 +834,4 @@ public class UtilsForTests {
|
|||
file.close();
|
||||
return file;
|
||||
}
|
||||
|
||||
/**
|
||||
* This formats the long tasktracker name to just the FQDN
|
||||
* @param taskTrackerLong String The long format of the tasktracker string
|
||||
* @return String The FQDN of the tasktracker
|
||||
* @throws Exception
|
||||
*/
|
||||
public static String getFQDNofTT (String taskTrackerLong) throws Exception {
|
||||
//Getting the exact FQDN of the tasktracker from the tasktracker string.
|
||||
String[] firstSplit = taskTrackerLong.split("_");
|
||||
String tmpOutput = firstSplit[1];
|
||||
String[] secondSplit = tmpOutput.split(":");
|
||||
String tmpTaskTracker = secondSplit[0];
|
||||
return tmpTaskTracker;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
|||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.server.MiniYARNCluster;
|
||||
import org.eclipse.jetty.util.ajax.JSON;
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.slf4j.Logger;
|
||||
import org.slf4j.LoggerFactory;
|
||||
|
||||
|
@ -193,8 +194,8 @@ public class MiniHadoopClusterManager {
|
|||
map.put("namenode_port", dfs.getNameNodePort());
|
||||
}
|
||||
if (mr != null) {
|
||||
map.put("resourcemanager_port", mr.getConfig().get(
|
||||
YarnConfiguration.RM_ADDRESS).split(":")[1]);
|
||||
map.put("resourcemanager_port", HostAndPort.fromString(
|
||||
mr.getConfig().get(YarnConfiguration.RM_ADDRESS)).getPort());
|
||||
}
|
||||
FileWriter fw = new FileWriter(new File(writeDetails));
|
||||
fw.write(new JSON().toJSON(map));
|
||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
|
|||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||
import org.apache.hadoop.classification.InterfaceStability.Stable;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
import com.google.common.net.HostAndPort;
|
||||
|
||||
/**
|
||||
* <p><code>NodeId</code> is the unique identifier for a node.</p>
|
||||
|
@ -116,17 +117,18 @@ public abstract class NodeId implements Comparable<NodeId> {
|
|||
@Public
|
||||
@Stable
|
||||
public static NodeId fromString(String nodeIdStr) {
|
||||
String[] parts = nodeIdStr.split(":");
|
||||
if (parts.length != 2) {
|
||||
throw new IllegalArgumentException("Invalid NodeId [" + nodeIdStr
|
||||
+ "]. Expected host:port");
|
||||
HostAndPort hp = HostAndPort.fromString(nodeIdStr);
|
||||
if (!hp.hasPort()) {
|
||||
throw new IllegalArgumentException(
|
||||
"Invalid NodeId [" + nodeIdStr + "]. Expected host:port");
|
||||
}
|
||||
try {
|
||||
NodeId nodeId =
|
||||
NodeId.newInstance(parts[0].trim(), Integer.parseInt(parts[1]));
|
||||
String hostPortStr = hp.toString();
|
||||
String host = hostPortStr.substring(0, hostPortStr.lastIndexOf(":"));
|
||||
NodeId nodeId = NodeId.newInstance(host, hp.getPort());
|
||||
return nodeId;
|
||||
} catch (NumberFormatException e) {
|
||||
throw new IllegalArgumentException("Invalid port: " + parts[1], e);
|
||||
throw new IllegalArgumentException("Invalid port: " + hp.getPort(), e);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
|
|||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.api.records.URL;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
|
||||
/**
|
||||
* This class contains a set of utilities which help converting data structures
|
||||
|
@ -114,11 +114,11 @@ public class ConverterUtils {
|
|||
|
||||
@Private
|
||||
@InterfaceStability.Unstable
|
||||
public static NodeId toNodeIdWithDefaultPort(String nodeIdStr) {
|
||||
if (nodeIdStr.indexOf(":") < 0) {
|
||||
return NodeId.fromString(nodeIdStr + ":0");
|
||||
}
|
||||
return NodeId.fromString(nodeIdStr);
|
||||
public static NodeId toNodeIdWithDefaultPort(
|
||||
String nodeIdStr) {
|
||||
HostAndPort hp = HostAndPort.fromString(nodeIdStr);
|
||||
hp = hp.withDefaultPort(0);
|
||||
return toNodeId(hp.toString());
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.webapp.BadRequestException;
|
|||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||
import org.apache.http.NameValuePair;
|
||||
import org.apache.http.client.utils.URLEncodedUtils;
|
||||
import com.google.common.net.HostAndPort;
|
||||
|
||||
import javax.servlet.http.HttpServletRequest;
|
||||
|
||||
|
@ -64,15 +65,13 @@ public class WebAppUtils {
|
|||
|
||||
public static void setRMWebAppPort(Configuration conf, int port) {
|
||||
String hostname = getRMWebAppURLWithoutScheme(conf);
|
||||
hostname =
|
||||
(hostname.contains(":")) ? hostname.substring(0, hostname.indexOf(":"))
|
||||
: hostname;
|
||||
setRMWebAppHostnameAndPort(conf, hostname, port);
|
||||
HostAndPort hp = HostAndPort.fromString(hostname);
|
||||
setRMWebAppHostnameAndPort(conf, hp.getHost(), port);
|
||||
}
|
||||
|
||||
public static void setRMWebAppHostnameAndPort(Configuration conf,
|
||||
String hostname, int port) {
|
||||
String resolvedAddress = hostname + ":" + port;
|
||||
String resolvedAddress = HostAndPort.fromParts(hostname, port).toString();
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, resolvedAddress);
|
||||
} else {
|
||||
|
@ -82,12 +81,11 @@ public class WebAppUtils {
|
|||
|
||||
public static void setNMWebAppHostNameAndPort(Configuration conf,
|
||||
String hostName, int port) {
|
||||
String hostPortString = HostAndPort.fromParts(hostName, port).toString();
|
||||
if (YarnConfiguration.useHttps(conf)) {
|
||||
conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
||||
hostName + ":" + port);
|
||||
conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS, hostPortString);
|
||||
} else {
|
||||
conf.set(YarnConfiguration.NM_WEBAPP_ADDRESS,
|
||||
hostName + ":" + port);
|
||||
conf.set(YarnConfiguration.NM_WEBAPP_ADDRESS, hostPortString);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -325,7 +323,8 @@ public class WebAppUtils {
|
|||
String host = conf.getTrimmed(hostProperty);
|
||||
if (host != null && !host.isEmpty()) {
|
||||
if (webAppURLWithoutScheme.contains(":")) {
|
||||
webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1];
|
||||
String[] splits = webAppURLWithoutScheme.split(":");
|
||||
webAppURLWithoutScheme = host + ":" + splits[splits.length - 1];
|
||||
}
|
||||
else {
|
||||
throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " +
|
||||
|
|
|
@ -18,6 +18,8 @@
|
|||
|
||||
package org.apache.hadoop.yarn.conf;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.apache.hadoop.net.NetUtils;
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||
|
@ -74,7 +76,7 @@ public class TestYarnConfiguration {
|
|||
conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999");
|
||||
String rmWebUrl = WebAppUtils.getRMWebAppURLWithScheme(conf);
|
||||
String[] parts = rmWebUrl.split(":");
|
||||
Assert.assertEquals("RM Web URL Port is incrrect", 24543,
|
||||
Assert.assertEquals("RM Web URL Port is incorrect", 24543,
|
||||
Integer.parseInt(parts[parts.length - 1]));
|
||||
Assert.assertNotSame(
|
||||
"RM Web Url not resolved correctly. Should not be rmtesting",
|
||||
|
@ -112,10 +114,9 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||
assertEquals(
|
||||
new InetSocketAddress(
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
assertEquals(new InetSocketAddress(
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
resourceTrackerAddress);
|
||||
|
||||
//with address
|
||||
|
@ -125,10 +126,8 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||
assertEquals(
|
||||
new InetSocketAddress(
|
||||
"10.0.0.1",
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
assertEquals(new InetSocketAddress("10.0.0.1",
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
resourceTrackerAddress);
|
||||
|
||||
//address and socket
|
||||
|
@ -139,9 +138,23 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||
assertEquals(
|
||||
new InetSocketAddress(
|
||||
"10.0.0.2",
|
||||
5001),
|
||||
new InetSocketAddress(
|
||||
"10.0.0.2",
|
||||
5001),
|
||||
resourceTrackerAddress);
|
||||
|
||||
// IPv6 address and socket
|
||||
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||
"[2401:db00:20:a01e:face:0:5:0]:5001");
|
||||
resourceTrackerAddress = conf.getSocketAddr(
|
||||
YarnConfiguration.RM_BIND_HOST,
|
||||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||
assertEquals(
|
||||
new InetSocketAddress(
|
||||
"2401:db00:20:a01e:face:0:5:0",
|
||||
5001),
|
||||
resourceTrackerAddress);
|
||||
|
||||
//bind host only
|
||||
|
@ -152,10 +165,8 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||
assertEquals(
|
||||
new InetSocketAddress(
|
||||
"10.0.0.3",
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
assertEquals(new InetSocketAddress("10.0.0.3",
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
resourceTrackerAddress);
|
||||
|
||||
//bind host and address no port
|
||||
|
@ -166,10 +177,8 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||
assertEquals(
|
||||
new InetSocketAddress(
|
||||
"0.0.0.0",
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
assertEquals(new InetSocketAddress("0.0.0.0",
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||
resourceTrackerAddress);
|
||||
|
||||
//bind host and address with port
|
||||
|
@ -180,10 +189,7 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||
assertEquals(
|
||||
new InetSocketAddress(
|
||||
"0.0.0.0",
|
||||
5003),
|
||||
assertEquals(new InetSocketAddress("0.0.0.0", 5003),
|
||||
resourceTrackerAddress);
|
||||
|
||||
}
|
||||
|
@ -197,9 +203,8 @@ public class TestYarnConfiguration {
|
|||
//no override, old behavior. Won't work on a host named "yo.yo.yo"
|
||||
conf = new YarnConfiguration();
|
||||
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
|
||||
serverAddress = new InetSocketAddress(
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
||||
Integer.parseInt(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
|
||||
serverAddress = newInetSocketAddressFromHostPort(
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS);
|
||||
|
||||
resourceTrackerConnectAddress = conf.updateConnectAddr(
|
||||
YarnConfiguration.RM_BIND_HOST,
|
||||
|
@ -207,15 +212,15 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
serverAddress);
|
||||
|
||||
assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
|
||||
assertFalse(NetUtils.getSocketAddressString(resourceTrackerConnectAddress)
|
||||
.startsWith("yo.yo.yo"));
|
||||
|
||||
//cause override with address
|
||||
conf = new YarnConfiguration();
|
||||
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
|
||||
conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
|
||||
serverAddress = new InetSocketAddress(
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
||||
Integer.parseInt(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
|
||||
serverAddress = newInetSocketAddressFromHostPort(
|
||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS);
|
||||
|
||||
resourceTrackerConnectAddress = conf.updateConnectAddr(
|
||||
YarnConfiguration.RM_BIND_HOST,
|
||||
|
@ -223,7 +228,8 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||
serverAddress);
|
||||
|
||||
assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
|
||||
assertTrue(NetUtils.getSocketAddressString(resourceTrackerConnectAddress)
|
||||
.startsWith("yo.yo.yo"));
|
||||
|
||||
//tests updateConnectAddr won't add suffix to NM service address configurations
|
||||
conf = new YarnConfiguration();
|
||||
|
@ -232,9 +238,8 @@ public class TestYarnConfiguration {
|
|||
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
|
||||
conf.set(YarnConfiguration.RM_HA_ID, "rm1");
|
||||
|
||||
serverAddress = new InetSocketAddress(
|
||||
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[0],
|
||||
Integer.parseInt(YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[1]));
|
||||
serverAddress = newInetSocketAddressFromHostPort(
|
||||
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS);
|
||||
|
||||
InetSocketAddress localizerAddress = conf.updateConnectAddr(
|
||||
YarnConfiguration.NM_BIND_HOST,
|
||||
|
@ -242,8 +247,15 @@ public class TestYarnConfiguration {
|
|||
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
|
||||
serverAddress);
|
||||
|
||||
assertTrue(localizerAddress.toString().startsWith("yo.yo.yo"));
|
||||
assertTrue(NetUtils.getSocketAddressString(localizerAddress)
|
||||
.startsWith("yo.yo.yo"));
|
||||
assertNull(conf.get(
|
||||
HAUtil.addSuffix(YarnConfiguration.NM_LOCALIZER_ADDRESS, "rm1")));
|
||||
}
|
||||
|
||||
private InetSocketAddress newInetSocketAddressFromHostPort(
|
||||
String hostPort) {
|
||||
HostAndPort hp = HostAndPort.fromString(hostPort);
|
||||
return new InetSocketAddress(hp.getHost(), hp.getPort());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
|
|||
import org.junit.Test;
|
||||
|
||||
public class TestConverterUtils {
|
||||
|
||||
|
||||
@Test
|
||||
public void testConvertUrlWithNoPort() throws URISyntaxException {
|
||||
Path expectedPath = new Path("hdfs://foo.com");
|
||||
|
@ -92,14 +92,24 @@ public class TestConverterUtils {
|
|||
@Test
|
||||
public void testNodeIdWithDefaultPort() throws URISyntaxException {
|
||||
NodeId nid;
|
||||
|
||||
|
||||
nid = ConverterUtils.toNodeIdWithDefaultPort("node:10");
|
||||
assertThat(nid.getPort()).isEqualTo(10);
|
||||
assertThat(nid.getHost()).isEqualTo("node");
|
||||
|
||||
|
||||
nid = ConverterUtils.toNodeIdWithDefaultPort("node");
|
||||
assertThat(nid.getPort()).isEqualTo(0);
|
||||
assertThat(nid.getHost()).isEqualTo("node");
|
||||
|
||||
nid = ConverterUtils
|
||||
.toNodeIdWithDefaultPort("[2401:db00:20:a01e:face:0:5:0]:10");
|
||||
assertEquals(nid.getPort(), 10);
|
||||
assertEquals(nid.getHost(), "[2401:db00:20:a01e:face:0:5:0]");
|
||||
|
||||
nid = ConverterUtils
|
||||
.toNodeIdWithDefaultPort("[2401:db00:20:a01e:face:0:5:0]");
|
||||
assertEquals(nid.getPort(), 0);
|
||||
assertEquals(nid.getHost(), "[2401:db00:20:a01e:face:0:5:0]");
|
||||
}
|
||||
|
||||
@Test(expected = IllegalArgumentException.class)
|
||||
|
|
|
@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
|
|||
import java.net.Socket;
|
||||
|
||||
import org.apache.hadoop.net.ServerSocketUtil;
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.junit.Assert;
|
||||
|
||||
import org.apache.hadoop.yarn.lib.ZKClient;
|
||||
|
@ -86,8 +87,9 @@ public class TestZKClient {
|
|||
long start = System.currentTimeMillis();
|
||||
while (true) {
|
||||
try {
|
||||
String host = hp.split(":")[0];
|
||||
int port = Integer.parseInt(hp.split(":")[1]);
|
||||
HostAndPort hap = HostAndPort.fromString(hp);
|
||||
String host = hap.getHost();
|
||||
int port = hap.getPort();
|
||||
send4LetterWord(host, port, "stat");
|
||||
} catch (IOException e) {
|
||||
return true;
|
||||
|
@ -110,8 +112,9 @@ public class TestZKClient {
|
|||
long start = System.currentTimeMillis();
|
||||
while (true) {
|
||||
try {
|
||||
String host = hp.split(":")[0];
|
||||
int port = Integer.parseInt(hp.split(":")[1]);
|
||||
HostAndPort hap = HostAndPort.fromString(hp);
|
||||
String host = hap.getHost();
|
||||
int port = hap.getPort();
|
||||
// if there are multiple hostports, just take the first one
|
||||
String result = send4LetterWord(host, port, "stat");
|
||||
if (result.startsWith("Zookeeper version:")) {
|
||||
|
@ -151,14 +154,15 @@ public class TestZKClient {
|
|||
}
|
||||
File dataDir = createTmpDir(BASETEST);
|
||||
zks = new ZooKeeperServer(dataDir, dataDir, 3000);
|
||||
final int PORT = Integer.parseInt(hostPort.split(":")[1]);
|
||||
HostAndPort hp = HostAndPort.fromString(hostPort);
|
||||
final int port = hp.getPort();
|
||||
if (factory == null) {
|
||||
factory = new NIOServerCnxnFactory();
|
||||
factory.configure(new InetSocketAddress(PORT), maxCnxns);
|
||||
factory.configure(new InetSocketAddress(port), maxCnxns);
|
||||
}
|
||||
factory.startup(zks);
|
||||
Assert.assertTrue("waiting for server up",
|
||||
waitForServerUp("127.0.0.1:" + PORT,
|
||||
waitForServerUp("127.0.0.1:" + port,
|
||||
CONNECTION_TIMEOUT));
|
||||
|
||||
}
|
||||
|
@ -172,10 +176,11 @@ public class TestZKClient {
|
|||
zkDb.close();
|
||||
} catch (IOException ie) {
|
||||
}
|
||||
final int PORT = Integer.parseInt(hostPort.split(":")[1]);
|
||||
HostAndPort hp = HostAndPort.fromString(hostPort);
|
||||
final int port = hp.getPort();
|
||||
|
||||
Assert.assertTrue("waiting for server down",
|
||||
waitForServerDown("127.0.0.1:" + PORT,
|
||||
waitForServerDown("127.0.0.1:" + port,
|
||||
CONNECTION_TIMEOUT));
|
||||
}
|
||||
|
||||
|
|
|
@ -185,6 +185,7 @@ import java.util.Set;
|
|||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
|
||||
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
|
||||
import com.google.common.net.HostAndPort;
|
||||
|
||||
import static org.apache.hadoop.service.Service.STATE.STARTED;
|
||||
|
||||
|
@ -645,7 +646,7 @@ public class ContainerManagerImpl extends CompositeService implements
|
|||
//hostname found when querying for our hostname with the specified
|
||||
//address, combine the specified address with the actual port listened
|
||||
//on by the server
|
||||
hostOverride = nmAddress.split(":")[0];
|
||||
hostOverride = HostAndPort.fromString(nmAddress).getHost();
|
||||
}
|
||||
|
||||
// setup node ID
|
||||
|
|
|
@ -137,6 +137,7 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
|||
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
|
||||
import org.eclipse.jetty.webapp.WebAppContext;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.PrintStream;
|
||||
|
@ -1399,8 +1400,8 @@ public class ResourceManager extends CompositeService
|
|||
builder.withAttribute(WebAppProxy.PROXY_CA,
|
||||
rmContext.getProxyCAManager().getProxyCA());
|
||||
builder.withAttribute(WebAppProxy.FETCHER_ATTRIBUTE, fetcher);
|
||||
String[] proxyParts = proxyHostAndPort.split(":");
|
||||
builder.withAttribute(WebAppProxy.PROXY_HOST_ATTRIBUTE, proxyParts[0]);
|
||||
builder.withAttribute(WebAppProxy.PROXY_HOST_ATTRIBUTE,
|
||||
HostAndPort.fromString(proxyHostAndPort).getHost());
|
||||
}
|
||||
|
||||
WebAppContext uiWebAppContext = null;
|
||||
|
|
|
@ -30,6 +30,7 @@ import java.util.Map;
|
|||
import java.util.Set;
|
||||
import java.util.concurrent.ConcurrentHashMap;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.Container;
|
||||
|
@ -105,8 +106,10 @@ public class MockNM {
|
|||
this.capability = capability;
|
||||
this.resourceTracker = resourceTracker;
|
||||
this.version = version;
|
||||
String[] splits = nodeIdStr.split(":");
|
||||
nodeId = BuilderUtils.newNodeId(splits[0], Integer.parseInt(splits[1]));
|
||||
HostAndPort hostAndPort = HostAndPort.fromString(nodeIdStr);
|
||||
String hostPortStr = hostAndPort.toString();
|
||||
String host = hostPortStr.substring(0, hostPortStr.lastIndexOf(":"));
|
||||
nodeId = BuilderUtils.newNodeId(host, hostAndPort.getPort());
|
||||
}
|
||||
|
||||
public MockNM(String nodeIdStr, Resource capability,
|
||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.webproxy;
|
|||
import java.io.IOException;
|
||||
import java.net.URI;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||
|
@ -57,7 +58,8 @@ public class WebAppProxy extends AbstractService {
|
|||
|
||||
@Override
|
||||
protected void serviceInit(Configuration conf) throws Exception {
|
||||
String auth = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
|
||||
String auth = conf.get(
|
||||
CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
|
||||
if (auth == null || "simple".equals(auth)) {
|
||||
isSecurityEnabled = false;
|
||||
} else if ("kerberos".equals(auth)) {
|
||||
|
@ -68,8 +70,7 @@ public class WebAppProxy extends AbstractService {
|
|||
" of " + auth);
|
||||
}
|
||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||
String[] proxyParts = proxy.split(":");
|
||||
proxyHost = proxyParts[0];
|
||||
proxyHost = HostAndPort.fromString(proxy).getHost();
|
||||
|
||||
fetcher = new AppReportFetcher(conf);
|
||||
bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.yarn.server.webproxy.amfilter;
|
||||
|
||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.http.FilterContainer;
|
||||
import org.apache.hadoop.http.FilterInitializer;
|
||||
|
@ -38,14 +39,15 @@ public class AmFilterInitializer extends FilterInitializer {
|
|||
private static final String FILTER_NAME = "AM_PROXY_FILTER";
|
||||
private static final String FILTER_CLASS = AmIpFilter.class.getCanonicalName();
|
||||
public static final String RM_HA_URLS = "RM_HA_URLS";
|
||||
|
||||
|
||||
@Override
|
||||
public void initFilter(FilterContainer container, Configuration conf) {
|
||||
Map<String, String> params = new HashMap<>();
|
||||
List<String> proxies = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
|
||||
StringBuilder sb = new StringBuilder();
|
||||
for (String proxy : proxies) {
|
||||
sb.append(proxy.split(":")[0]).append(AmIpFilter.PROXY_HOSTS_DELIMITER);
|
||||
sb.append(HostAndPort.fromString(proxy).getHost())
|
||||
.append(AmIpFilter.PROXY_HOSTS_DELIMITER);
|
||||
}
|
||||
sb.setLength(sb.length() - 1);
|
||||
params.put(AmIpFilter.PROXY_HOSTS, sb.toString());
|
||||
|
|
|
@ -50,6 +50,7 @@ import javax.servlet.http.HttpServlet;
|
|||
import javax.servlet.http.HttpServletRequest;
|
||||
import javax.servlet.http.HttpServletResponse;
|
||||
|
||||
import com.google.common.net.HostAndPort;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||
import org.apache.hadoop.http.HttpServer2;
|
||||
|
@ -589,8 +590,7 @@ public class TestWebAppProxyServlet {
|
|||
proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, Boolean.TRUE);
|
||||
|
||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||
String[] proxyParts = proxy.split(":");
|
||||
String proxyHost = proxyParts[0];
|
||||
String proxyHost = HostAndPort.fromString(proxy).getHost();
|
||||
|
||||
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
|
||||
proxyServer.start();
|
||||
|
|
Loading…
Reference in New Issue