Compare commits
11 Commits
trunk
...
HADOOP-178
Author | SHA1 | Date |
---|---|---|
Renukaprasad C | f5e9b6861a | |
Renukaprasad C | c0f0b33e40 | |
Brahma Reddy Battula | 7118db5ee3 | |
Brahma Reddy Battula | c0c70e0833 | |
Brahma Reddy Battula | 3133386ac4 | |
Brahma Reddy Battula | 2c9b22f15c | |
Brahma Reddy Battula | eaad653180 | |
Brahma Reddy Battula | b30674140b | |
Brahma Reddy Battula | 809cca765f | |
Brahma Reddy Battula | ddecfe1524 | |
Brahma Reddy Battula | f293a2ff71 |
|
@ -619,7 +619,12 @@ function hadoop_bootstrap
|
||||||
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
|
export HADOOP_OS_TYPE=${HADOOP_OS_TYPE:-$(uname -s)}
|
||||||
|
|
||||||
# defaults
|
# defaults
|
||||||
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
# shellcheck disable=SC2154
|
||||||
|
if [[ "${HADOOP_ALLOW_IPV6}" -ne "yes" ]]; then
|
||||||
|
export HADOOP_OPTS=${HADOOP_OPTS:-"-Djava.net.preferIPv4Stack=true"}
|
||||||
|
else
|
||||||
|
export HADOOP_OPTS=${HADOOP_OPTS:-""}
|
||||||
|
fi
|
||||||
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
|
hadoop_debug "Initial HADOOP_OPTS=${HADOOP_OPTS}"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -85,8 +85,7 @@
|
||||||
# Kerberos security.
|
# Kerberos security.
|
||||||
# export HADOOP_JAAS_DEBUG=true
|
# export HADOOP_JAAS_DEBUG=true
|
||||||
|
|
||||||
# Extra Java runtime options for all Hadoop commands. We don't support
|
# Extra Java runtime options for all Hadoop commands.
|
||||||
# IPv6 yet/still, so by default the preference is set to IPv4.
|
|
||||||
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
|
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true"
|
||||||
# For Kerberos debugging, an extended option set logs more information
|
# For Kerberos debugging, an extended option set logs more information
|
||||||
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
|
# export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true -Dsun.security.krb5.debug=true -Dsun.security.spnego.debug"
|
||||||
|
|
|
@ -2562,7 +2562,7 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
|
||||||
return updateConnectAddr(addressProperty, addr);
|
return updateConnectAddr(addressProperty, addr);
|
||||||
}
|
}
|
||||||
|
|
||||||
final String connectHost = connectHostPort.split(":")[0];
|
final String connectHost = NetUtils.getHostFromHostPort(connectHostPort);
|
||||||
// Create connect address using client address hostname and server port.
|
// Create connect address using client address hostname and server port.
|
||||||
return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
|
return updateConnectAddr(addressProperty, NetUtils.createSocketAddrForHost(
|
||||||
connectHost, addr.getPort()));
|
connectHost, addr.getPort()));
|
||||||
|
|
|
@ -82,6 +82,7 @@ import com.fasterxml.jackson.databind.ObjectMapper;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Strings;
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
|
import static org.apache.hadoop.util.KMSUtil.checkNotEmpty;
|
||||||
import static org.apache.hadoop.util.KMSUtil.checkNotNull;
|
import static org.apache.hadoop.util.KMSUtil.checkNotNull;
|
||||||
|
@ -290,16 +291,20 @@ public class KMSClientProvider extends KeyProvider implements CryptoExtension,
|
||||||
// In the current scheme, all hosts have to run on the same port
|
// In the current scheme, all hosts have to run on the same port
|
||||||
int port = -1;
|
int port = -1;
|
||||||
String hostsPart = authority;
|
String hostsPart = authority;
|
||||||
|
|
||||||
if (authority.contains(":")) {
|
if (authority.contains(":")) {
|
||||||
String[] t = authority.split(":");
|
|
||||||
try {
|
try {
|
||||||
port = Integer.parseInt(t[1]);
|
HostAndPort hp = HostAndPort.fromString(hostsPart);
|
||||||
} catch (Exception e) {
|
if (hp.hasPort()) {
|
||||||
|
port = hp.getPort();
|
||||||
|
hostsPart = hp.getHost();
|
||||||
|
}
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
throw new IOException(
|
throw new IOException(
|
||||||
"Could not parse port in kms uri [" + origUrl + "]");
|
"Could not parse port in kms uri [" + origUrl + "]");
|
||||||
}
|
}
|
||||||
hostsPart = t[0];
|
|
||||||
}
|
}
|
||||||
|
|
||||||
KMSClientProvider[] providers =
|
KMSClientProvider[] providers =
|
||||||
createProviders(conf, origUrl, port, hostsPart);
|
createProviders(conf, origUrl, port, hostsPart);
|
||||||
return new LoadBalancingKMSClientProvider(providerUri, providers, conf);
|
return new LoadBalancingKMSClientProvider(providerUri, providers, conf);
|
||||||
|
|
|
@ -199,8 +199,16 @@ public class Path
|
||||||
int start = 0;
|
int start = 0;
|
||||||
|
|
||||||
// parse uri scheme, if any
|
// parse uri scheme, if any
|
||||||
int colon = pathString.indexOf(':');
|
int colon = -1;
|
||||||
int slash = pathString.indexOf('/');
|
int slash = pathString.indexOf('/');
|
||||||
|
if (StringUtils.countMatches(pathString, ":") > 2) {
|
||||||
|
//In case of IPv6 address, we should be able to parse the scheme
|
||||||
|
// correctly (This will ensure to parse path with & without scheme
|
||||||
|
// correctly in IPv6).
|
||||||
|
colon = pathString.indexOf(":/");
|
||||||
|
} else {
|
||||||
|
colon = pathString.indexOf(':');
|
||||||
|
}
|
||||||
if ((colon != -1) &&
|
if ((colon != -1) &&
|
||||||
((slash == -1) || (colon < slash))) { // has a scheme
|
((slash == -1) || (colon < slash))) { // has a scheme
|
||||||
scheme = pathString.substring(0, colon);
|
scheme = pathString.substring(0, colon);
|
||||||
|
|
|
@ -499,10 +499,11 @@ public class Client implements AutoCloseable {
|
||||||
boolean trySasl = UserGroupInformation.isSecurityEnabled() ||
|
boolean trySasl = UserGroupInformation.isSecurityEnabled() ||
|
||||||
(ticket != null && !ticket.getTokens().isEmpty());
|
(ticket != null && !ticket.getTokens().isEmpty());
|
||||||
this.authProtocol = trySasl ? AuthProtocol.SASL : AuthProtocol.NONE;
|
this.authProtocol = trySasl ? AuthProtocol.SASL : AuthProtocol.NONE;
|
||||||
|
|
||||||
this.setName("IPC Client (" + socketFactory.hashCode() +") connection to " +
|
this.setName(
|
||||||
server.toString() +
|
"IPC Client (" + socketFactory.hashCode() + ") connection to "
|
||||||
" from " + ((ticket==null)?"an unknown user":ticket.getUserName()));
|
+ NetUtils.getSocketAddressString(server) + " from " + ((ticket
|
||||||
|
== null) ? "an unknown user" : ticket.getUserName()));
|
||||||
this.setDaemon(true);
|
this.setDaemon(true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -636,8 +637,9 @@ public class Client implements AutoCloseable {
|
||||||
server.getHostName(), server.getPort());
|
server.getHostName(), server.getPort());
|
||||||
|
|
||||||
if (!server.equals(currentAddr)) {
|
if (!server.equals(currentAddr)) {
|
||||||
LOG.warn("Address change detected. Old: " + server.toString() +
|
LOG.warn("Address change detected. Old: " + NetUtils
|
||||||
" New: " + currentAddr.toString());
|
.getSocketAddressString(server) + " New: " + NetUtils
|
||||||
|
.getSocketAddressString(currentAddr));
|
||||||
server = currentAddr;
|
server = currentAddr;
|
||||||
UserGroupInformation ticket = remoteId.getTicket();
|
UserGroupInformation ticket = remoteId.getTicket();
|
||||||
this.setName("IPC Client (" + socketFactory.hashCode()
|
this.setName("IPC Client (" + socketFactory.hashCode()
|
||||||
|
@ -1835,7 +1837,7 @@ public class Client implements AutoCloseable {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return address.toString();
|
return NetUtils.getSocketAddressString(address);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
import java.net.Inet6Address;
|
||||||
import java.net.NetworkInterface;
|
import java.net.NetworkInterface;
|
||||||
import java.net.SocketException;
|
import java.net.SocketException;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
|
@ -74,22 +75,23 @@ public class DNS {
|
||||||
* @throws NamingException If a NamingException is encountered
|
* @throws NamingException If a NamingException is encountered
|
||||||
*/
|
*/
|
||||||
public static String reverseDns(InetAddress hostIp, @Nullable String ns)
|
public static String reverseDns(InetAddress hostIp, @Nullable String ns)
|
||||||
throws NamingException {
|
throws NamingException {
|
||||||
//
|
String dnsQueryAddress;
|
||||||
// Builds the reverse IP lookup form
|
if (hostIp instanceof Inet6Address) {
|
||||||
// This is formed by reversing the IP numbers and appending in-addr.arpa
|
dnsQueryAddress = getIPv6DnsAddr((Inet6Address) hostIp, ns);
|
||||||
//
|
} else {
|
||||||
String[] parts = hostIp.getHostAddress().split("\\.");
|
dnsQueryAddress = getIPv4DnsAddr(hostIp, ns);
|
||||||
String reverseIP = parts[3] + "." + parts[2] + "." + parts[1] + "."
|
}
|
||||||
+ parts[0] + ".in-addr.arpa";
|
LOG.info("Querying using DNS address: " + dnsQueryAddress);
|
||||||
|
|
||||||
DirContext ictx = new InitialDirContext();
|
DirContext ictx = new InitialDirContext();
|
||||||
Attributes attribute;
|
Attributes attribute;
|
||||||
try {
|
try {
|
||||||
attribute = ictx.getAttributes("dns://" // Use "dns:///" if the default
|
// Use "dns:///" if the default
|
||||||
+ ((ns == null) ? "" : ns) +
|
// nameserver is to be used
|
||||||
// nameserver is to be used
|
attribute = ictx.getAttributes(
|
||||||
"/" + reverseIP, new String[] { "PTR" });
|
"dns://" + ((ns == null) ? "" : ns) + "/" + dnsQueryAddress,
|
||||||
|
new String[] {"PTR"});
|
||||||
} finally {
|
} finally {
|
||||||
ictx.close();
|
ictx.close();
|
||||||
}
|
}
|
||||||
|
@ -102,18 +104,53 @@ public class DNS {
|
||||||
return hostname;
|
return hostname;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private static String getIPv4DnsAddr(InetAddress hostIp, @Nullable String ns)
|
||||||
|
throws NamingException {
|
||||||
|
String ipString = hostIp.getHostAddress();
|
||||||
|
LOG.info("Doing reverse DNS lookup for IPv4 address: " + ipString);
|
||||||
|
String[] parts = ipString.split("\\.");
|
||||||
|
if (parts.length != 4) {
|
||||||
|
throw new NamingException("Invalid IPv4 address " + ipString);
|
||||||
|
}
|
||||||
|
|
||||||
|
return parts[3] + "." + parts[2] + "." + parts[1] + "." + parts[0]
|
||||||
|
+ ".in-addr.arpa";
|
||||||
|
}
|
||||||
|
|
||||||
|
@VisibleForTesting
|
||||||
|
public static String getIPv6DnsAddr(Inet6Address hostIp, @Nullable String ns)
|
||||||
|
throws NamingException {
|
||||||
|
LOG.info("Doing reverse DNS lookup for IPv6 address: " +
|
||||||
|
hostIp.getHostAddress());
|
||||||
|
|
||||||
|
// bytes need to be converted to hex string and reversed to get IPv6
|
||||||
|
// reverse resolution address
|
||||||
|
byte[] bytes = hostIp.getAddress();
|
||||||
|
StringBuilder sb = new StringBuilder();
|
||||||
|
for(int pos = bytes.length - 1; pos >= 0; pos--) {
|
||||||
|
byte b = bytes[pos];
|
||||||
|
String hexStr = String.format("%02x", b);
|
||||||
|
sb.append(hexStr.charAt(1));
|
||||||
|
sb.append(".");
|
||||||
|
sb.append(hexStr.charAt(0));
|
||||||
|
sb.append(".");
|
||||||
|
}
|
||||||
|
sb.append("ip6.arpa");
|
||||||
|
return sb.toString();
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return NetworkInterface for the given subinterface name (eg eth0:0)
|
* @return NetworkInterface for the given subinterface name (eg eth0:0)
|
||||||
* or null if no interface with the given name can be found
|
* or null if no interface with the given name can be found
|
||||||
*/
|
*/
|
||||||
private static NetworkInterface getSubinterface(String strInterface)
|
private static NetworkInterface getSubinterface(String strInterface)
|
||||||
throws SocketException {
|
throws SocketException {
|
||||||
Enumeration<NetworkInterface> nifs =
|
Enumeration<NetworkInterface> nifs =
|
||||||
NetworkInterface.getNetworkInterfaces();
|
NetworkInterface.getNetworkInterfaces();
|
||||||
|
|
||||||
while (nifs.hasMoreElements()) {
|
while (nifs.hasMoreElements()) {
|
||||||
Enumeration<NetworkInterface> subNifs =
|
Enumeration<NetworkInterface> subNifs =
|
||||||
nifs.nextElement().getSubInterfaces();
|
nifs.nextElement().getSubInterfaces();
|
||||||
|
|
||||||
while (subNifs.hasMoreElements()) {
|
while (subNifs.hasMoreElements()) {
|
||||||
NetworkInterface nif = subNifs.nextElement();
|
NetworkInterface nif = subNifs.nextElement();
|
||||||
|
|
|
@ -40,12 +40,12 @@ import java.nio.channels.SocketChannel;
|
||||||
import java.nio.channels.UnresolvedAddressException;
|
import java.nio.channels.UnresolvedAddressException;
|
||||||
import java.util.Map.Entry;
|
import java.util.Map.Entry;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.regex.Pattern;
|
|
||||||
import java.util.*;
|
import java.util.*;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
import javax.net.SocketFactory;
|
import javax.net.SocketFactory;
|
||||||
|
|
||||||
|
import org.apache.commons.lang3.StringUtils;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.cache.Cache;
|
import org.apache.hadoop.thirdparty.com.google.common.cache.Cache;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;
|
import org.apache.hadoop.thirdparty.com.google.common.cache.CacheBuilder;
|
||||||
|
@ -61,6 +61,11 @@ import org.apache.hadoop.ipc.VersionedProtocol;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
||||||
|
import org.apache.http.conn.util.InetAddressUtils;
|
||||||
|
import java.net.*;
|
||||||
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
@ -70,7 +75,7 @@ import org.slf4j.LoggerFactory;
|
||||||
public class NetUtils {
|
public class NetUtils {
|
||||||
private static final Logger LOG = LoggerFactory.getLogger(NetUtils.class);
|
private static final Logger LOG = LoggerFactory.getLogger(NetUtils.class);
|
||||||
|
|
||||||
private static Map<String, String> hostToResolved =
|
private static Map<String, String> hostToResolved =
|
||||||
new HashMap<String, String>();
|
new HashMap<String, String>();
|
||||||
/** text to point users elsewhere: {@value} */
|
/** text to point users elsewhere: {@value} */
|
||||||
private static final String FOR_MORE_DETAILS_SEE
|
private static final String FOR_MORE_DETAILS_SEE
|
||||||
|
@ -219,6 +224,22 @@ public class NetUtils {
|
||||||
}
|
}
|
||||||
target = target.trim();
|
target = target.trim();
|
||||||
boolean hasScheme = target.contains("://");
|
boolean hasScheme = target.contains("://");
|
||||||
|
if (StringUtils.countMatches(target, ":") > 2) {
|
||||||
|
// if scheme exists in the target
|
||||||
|
// for example : https://ffff:ffff:ffff:ffff::1:XXXXX
|
||||||
|
// we have to form https://[ffff:ffff:ffff:ffff::1]:XXXXX
|
||||||
|
if (hasScheme) {
|
||||||
|
int i = target.lastIndexOf("/");
|
||||||
|
String scheme = target.substring(0, i + 1);
|
||||||
|
String ipAddrWithPort = target.substring(i + 1);
|
||||||
|
target = scheme + normalizeV6Address(ipAddrWithPort);
|
||||||
|
} else {
|
||||||
|
// if scheme does not exists in the target
|
||||||
|
// for example : ffff:ffff:ffff:ffff::1:XXXXX
|
||||||
|
// we have to form [ffff:ffff:ffff:ffff::1]:XXXXX
|
||||||
|
target = normalizeV6Address(target);
|
||||||
|
}
|
||||||
|
}
|
||||||
URI uri = createURI(target, hasScheme, helpText, useCacheIfPresent);
|
URI uri = createURI(target, hasScheme, helpText, useCacheIfPresent);
|
||||||
|
|
||||||
String host = uri.getHost();
|
String host = uri.getHost();
|
||||||
|
@ -271,6 +292,24 @@ public class NetUtils {
|
||||||
return uri;
|
return uri;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public static String normalizeV6Address(String target) {
|
||||||
|
if (!target.startsWith("[")) {
|
||||||
|
if (target.contains("%")) {
|
||||||
|
int i = target.lastIndexOf('%');
|
||||||
|
target = target.trim();
|
||||||
|
String port = target.substring(target.lastIndexOf(":") + 1);
|
||||||
|
String addr = target.substring(0, i);
|
||||||
|
target = "[" + addr + "]" + ":" + port;
|
||||||
|
} else {
|
||||||
|
int i = target.lastIndexOf(':');
|
||||||
|
String port = target.substring(target.lastIndexOf(":") + 1);
|
||||||
|
String addr = target.substring(0, i);
|
||||||
|
target = "[" + addr + "]" + ":" + port;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return target;
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a socket address with the given host and port. The hostname
|
* Create a socket address with the given host and port. The hostname
|
||||||
* might be replaced with another host that was set via
|
* might be replaced with another host that was set via
|
||||||
|
@ -669,9 +708,6 @@ public class NetUtils {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static final Pattern ipPortPattern = // Pattern for matching ip[:port]
|
|
||||||
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}(:\\d+)?");
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Attempt to obtain the host name of the given string which contains
|
* Attempt to obtain the host name of the given string which contains
|
||||||
* an IP address and an optional port.
|
* an IP address and an optional port.
|
||||||
|
@ -680,16 +716,26 @@ public class NetUtils {
|
||||||
* @return Host name or null if the name can not be determined
|
* @return Host name or null if the name can not be determined
|
||||||
*/
|
*/
|
||||||
public static String getHostNameOfIP(String ipPort) {
|
public static String getHostNameOfIP(String ipPort) {
|
||||||
if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
|
String ip = null;
|
||||||
|
if (null == ipPort || ipPort.isEmpty()) {
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
try {
|
try {
|
||||||
int colonIdx = ipPort.indexOf(':');
|
HostAndPort hostAndPort = HostAndPort.fromString(ipPort);
|
||||||
String ip = (-1 == colonIdx) ? ipPort
|
ip = hostAndPort.getHost();
|
||||||
: ipPort.substring(0, ipPort.indexOf(':'));
|
if (!InetAddresses.isInetAddress(ip)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
LOG.debug("getHostNameOfIP: '" + ipPort
|
||||||
|
+ "' is not a valid IP address or IP/Port pair.", e);
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
try {
|
||||||
return InetAddress.getByName(ip).getHostName();
|
return InetAddress.getByName(ip).getHostName();
|
||||||
} catch (UnknownHostException e) {
|
} catch (UnknownHostException e) {
|
||||||
|
LOG.trace("getHostNameOfIP: '"+ipPort+"' name not resolved.", e);
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -702,8 +748,20 @@ public class NetUtils {
|
||||||
* @return host:port
|
* @return host:port
|
||||||
*/
|
*/
|
||||||
public static String normalizeIP2HostName(String ipPort) {
|
public static String normalizeIP2HostName(String ipPort) {
|
||||||
if (null == ipPort || !ipPortPattern.matcher(ipPort).matches()) {
|
String ip = null;
|
||||||
return ipPort;
|
if (null == ipPort || ipPort.isEmpty()) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
HostAndPort hostAndPort = HostAndPort.fromString(ipPort);
|
||||||
|
ip = hostAndPort.getHost();
|
||||||
|
if (!InetAddresses.isInetAddress(ip)) {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
|
LOG.debug("getHostNameOfIP: '" + ipPort
|
||||||
|
+ "' is not a valid IP address or IP/Port pair.", e);
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
InetSocketAddress address = createSocketAddr(ipPort);
|
InetSocketAddress address = createSocketAddr(ipPort);
|
||||||
|
@ -735,11 +793,88 @@ public class NetUtils {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compose a "host:port" string from the address.
|
* Compose a "host:port" string from the address.
|
||||||
|
*
|
||||||
|
* Note that this preferentially returns the host name if available; if the
|
||||||
|
* IP address is desired, use getIPPortString(); if both are desired as in
|
||||||
|
* InetSocketAddress.toString, use getSocketAddressString()
|
||||||
*/
|
*/
|
||||||
public static String getHostPortString(InetSocketAddress addr) {
|
public static String getHostPortString(InetSocketAddress addr) {
|
||||||
return addr.getHostName() + ":" + addr.getPort();
|
String hostName = addr.getHostName();
|
||||||
|
if (InetAddressUtils.isIPv6Address(hostName)) {
|
||||||
|
return "[" + hostName + "]:" + addr.getPort();
|
||||||
|
}
|
||||||
|
return hostName.toLowerCase() + ":" + addr.getPort();
|
||||||
}
|
}
|
||||||
|
/**
|
||||||
|
* Compose a "ip:port" string from the InetSocketAddress.
|
||||||
|
*
|
||||||
|
* Note that this may result in an NPE if passed an unresolved
|
||||||
|
* InetSocketAddress.
|
||||||
|
*/
|
||||||
|
public static String getIPPortString(InetSocketAddress addr) {
|
||||||
|
final InetAddress ip = addr.getAddress();
|
||||||
|
// this is a judgement call, and we might arguably just guard against NPE
|
||||||
|
// by treating null as "" ; I think this is going to hide more bugs than it
|
||||||
|
// prevents
|
||||||
|
if (ip == null) {
|
||||||
|
throw new IllegalArgumentException(
|
||||||
|
"getIPPortString called with unresolved InetSocketAddress : "
|
||||||
|
+ getSocketAddressString(addr));
|
||||||
|
}
|
||||||
|
String ipString = ip.getHostAddress();
|
||||||
|
if (ip instanceof Inet6Address) {
|
||||||
|
return "[" + ipString + "]:" + addr.getPort();
|
||||||
|
}
|
||||||
|
return ipString + ":" + addr.getPort();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static String getIPPortString(String ipAddr, int port) {
|
||||||
|
String s;
|
||||||
|
if (ipAddr != null) {
|
||||||
|
s = ipAddr + ":" + port;
|
||||||
|
} else {
|
||||||
|
s = ":" + port;
|
||||||
|
}
|
||||||
|
//Blank eventually will get to treated as localhost if this gets down to
|
||||||
|
// InetAddress. Tests extensively use a blank address, and we don't want
|
||||||
|
// to change behavior here.
|
||||||
|
if (ipAddr != null && !ipAddr.isEmpty() && InetAddressUtils
|
||||||
|
.isIPv6Address(ipAddr)) {
|
||||||
|
try {
|
||||||
|
InetAddress addr = InetAddress.getByName(ipAddr);
|
||||||
|
String cleanAddr = addr.getHostAddress();
|
||||||
|
if (addr instanceof Inet6Address) {
|
||||||
|
s = '[' + cleanAddr + ']' + ":" + port;
|
||||||
|
}
|
||||||
|
} catch (UnknownHostException e) {
|
||||||
|
// ignore anything that isn't an IPv6 literal and keep the old
|
||||||
|
// behavior. could add debug log here, but this should only happen
|
||||||
|
// if there's a bug in InetAddressUtils.isIPv6Address which accepts
|
||||||
|
// something that isn't an IPv6 literal.
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return s;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An IPv6-safe version of InetSocketAddress.toString().
|
||||||
|
* Note that this will typically be of the form hostname/IP:port and is NOT
|
||||||
|
* a substitute for getHostPortString or getIPPortString.
|
||||||
|
*/
|
||||||
|
public static String getSocketAddressString(InetSocketAddress addr) {
|
||||||
|
if (addr.isUnresolved()) {
|
||||||
|
return addr.toString();
|
||||||
|
}
|
||||||
|
InetAddress ip = addr.getAddress();
|
||||||
|
if (ip instanceof Inet6Address) {
|
||||||
|
String hostName = addr.getHostName();
|
||||||
|
return ((hostName != null) ? hostName : "")
|
||||||
|
+ "/[" + ip.getHostAddress() + "]:" + addr.getPort();
|
||||||
|
} else {
|
||||||
|
return addr.toString();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Checks if {@code host} is a local host name and return {@link InetAddress}
|
* Checks if {@code host} is a local host name and return {@link InetAddress}
|
||||||
* corresponding to that address.
|
* corresponding to that address.
|
||||||
|
@ -1036,6 +1171,38 @@ public class NetUtils {
|
||||||
return port;
|
return port;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method on HostAndPort; returns the port from a host:port
|
||||||
|
* or IP:port pair.
|
||||||
|
*
|
||||||
|
* It's probably best to create your own HostAndPort.fromString(hp) and
|
||||||
|
* do a .getPort and .getHostText if you need both host and port in one
|
||||||
|
* scope.
|
||||||
|
*/
|
||||||
|
public static int getPortFromHostPort(String hp) {
|
||||||
|
return HostAndPort.fromString(hp).getPort();
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Wrapper method on HostAndPort; returns the host from a host:port
|
||||||
|
* or IP:port pair.
|
||||||
|
*
|
||||||
|
* It's probably best to create your own HostAndPort.fromString(hp) and
|
||||||
|
* do a .getPort and .getHostText if you need both host and port in one
|
||||||
|
* scope.
|
||||||
|
*/
|
||||||
|
public static String getHostFromHostPort(String hp) {
|
||||||
|
return HostAndPort.fromString(hp).getHost();
|
||||||
|
}
|
||||||
|
|
||||||
|
public static InetAddress getInetAddressFromInetSocketAddressString(
|
||||||
|
String remoteAddr) {
|
||||||
|
int slashIdx = remoteAddr.indexOf('/') + 1;
|
||||||
|
int colonIdx = remoteAddr.lastIndexOf(':');
|
||||||
|
String ipOnly = remoteAddr.substring(slashIdx, colonIdx);
|
||||||
|
return InetAddresses.forString(ipOnly);
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Return an @{@link InetAddress} to bind to. If bindWildCardAddress is true
|
* Return an @{@link InetAddress} to bind to. If bindWildCardAddress is true
|
||||||
* than returns null.
|
* than returns null.
|
||||||
|
|
|
@ -30,6 +30,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
import org.apache.hadoop.conf.Configurable;
|
import org.apache.hadoop.conf.Configurable;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SOCKS_SERVER_KEY;
|
import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SOCKS_SERVER_KEY;
|
||||||
|
|
||||||
|
@ -148,13 +149,16 @@ public class SocksSocketFactory extends SocketFactory implements
|
||||||
* @param proxyStr the proxy address using the format "host:port"
|
* @param proxyStr the proxy address using the format "host:port"
|
||||||
*/
|
*/
|
||||||
private void setProxy(String proxyStr) {
|
private void setProxy(String proxyStr) {
|
||||||
String[] strs = proxyStr.split(":", 2);
|
try {
|
||||||
if (strs.length != 2)
|
HostAndPort hp = HostAndPort.fromString(proxyStr);
|
||||||
|
if (!hp.hasPort()) {
|
||||||
|
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
|
||||||
|
}
|
||||||
|
String host = hp.getHost();
|
||||||
|
this.proxy = new Proxy(Proxy.Type.SOCKS,
|
||||||
|
InetSocketAddress.createUnresolved(host, hp.getPort()));
|
||||||
|
} catch (IllegalArgumentException e) {
|
||||||
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
|
throw new RuntimeException("Bad SOCKS proxy parameter: " + proxyStr);
|
||||||
String host = strs[0];
|
}
|
||||||
int port = Integer.parseInt(strs[1]);
|
|
||||||
this.proxy =
|
|
||||||
new Proxy(Proxy.Type.SOCKS, InetSocketAddress.createUnresolved(host,
|
|
||||||
port));
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -445,7 +445,7 @@ public final class SecurityUtil {
|
||||||
if (token != null) {
|
if (token != null) {
|
||||||
token.setService(service);
|
token.setService(service);
|
||||||
if (LOG.isDebugEnabled()) {
|
if (LOG.isDebugEnabled()) {
|
||||||
LOG.debug("Acquired token "+token); // Token#toString() prints service
|
LOG.debug("Acquired token " + token); // Token#toString() prints service
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG.warn("Failed to get token for service "+service);
|
LOG.warn("Failed to get token for service "+service);
|
||||||
|
@ -459,18 +459,15 @@ public final class SecurityUtil {
|
||||||
* hadoop.security.token.service.use_ip
|
* hadoop.security.token.service.use_ip
|
||||||
*/
|
*/
|
||||||
public static Text buildTokenService(InetSocketAddress addr) {
|
public static Text buildTokenService(InetSocketAddress addr) {
|
||||||
String host = null;
|
|
||||||
if (useIpForTokenService) {
|
if (useIpForTokenService) {
|
||||||
if (addr.isUnresolved()) { // host has no ip address
|
if (addr.isUnresolved()) { // host has no ip address
|
||||||
throw new IllegalArgumentException(
|
throw new IllegalArgumentException(
|
||||||
new UnknownHostException(addr.getHostName())
|
new UnknownHostException(addr.getHostName())
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
host = addr.getAddress().getHostAddress();
|
return new Text(NetUtils.getIPPortString(addr));
|
||||||
} else {
|
|
||||||
host = StringUtils.toLowerCase(addr.getHostName());
|
|
||||||
}
|
}
|
||||||
return new Text(host + ":" + addr.getPort());
|
return new Text(NetUtils.getHostPortString(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -125,10 +125,10 @@ public class DefaultImpersonationProvider implements ImpersonationProvider {
|
||||||
+ " is not allowed to impersonate " + user.getUserName());
|
+ " is not allowed to impersonate " + user.getUserName());
|
||||||
}
|
}
|
||||||
|
|
||||||
MachineList MachineList = proxyHosts.get(
|
MachineList machineList = proxyHosts.get(
|
||||||
getProxySuperuserIpConfKey(realUser.getShortUserName()));
|
getProxySuperuserIpConfKey(realUser.getShortUserName()));
|
||||||
|
|
||||||
if(MachineList == null || !MachineList.includes(remoteAddress)) {
|
if(machineList == null || !machineList.includes(remoteAddress)) {
|
||||||
throw new AuthorizationException("Unauthorized connection for super-user: "
|
throw new AuthorizationException("Unauthorized connection for super-user: "
|
||||||
+ realUser.getUserName() + " from IP " + remoteAddress);
|
+ realUser.getUserName() + " from IP " + remoteAddress);
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@ import java.util.concurrent.CountDownLatch;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
import java.util.concurrent.TimeoutException;
|
import java.util.concurrent.TimeoutException;
|
||||||
|
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.ServerSocketUtil;
|
import org.apache.hadoop.net.ServerSocketUtil;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
@ -330,56 +331,39 @@ public abstract class ClientBaseWithFixes extends ZKTestCase {
|
||||||
return tmpDir;
|
return tmpDir;
|
||||||
}
|
}
|
||||||
|
|
||||||
private static int getPort(String hostPort) {
|
static ServerCnxnFactory createNewServerInstance(File dataDir,
|
||||||
String[] split = hostPort.split(":");
|
ServerCnxnFactory factory, String hostPort, int maxCnxns)
|
||||||
String portstr = split[split.length-1];
|
throws IOException, InterruptedException {
|
||||||
String[] pc = portstr.split("/");
|
ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
|
||||||
if (pc.length > 1) {
|
final int port = NetUtils.getPortFromHostPort(hostPort);
|
||||||
portstr = pc[0];
|
if (factory == null) {
|
||||||
}
|
factory = ServerCnxnFactory.createFactory(port, maxCnxns);
|
||||||
return Integer.parseInt(portstr);
|
|
||||||
}
|
}
|
||||||
|
factory.startup(zks);
|
||||||
|
Assert.assertTrue("waiting for server up", ClientBaseWithFixes
|
||||||
|
.waitForServerUp("127.0.0.1:" + port, CONNECTION_TIMEOUT));
|
||||||
|
|
||||||
static ServerCnxnFactory createNewServerInstance(File dataDir,
|
return factory;
|
||||||
ServerCnxnFactory factory, String hostPort, int maxCnxns)
|
}
|
||||||
throws IOException, InterruptedException
|
|
||||||
{
|
|
||||||
ZooKeeperServer zks = new ZooKeeperServer(dataDir, dataDir, 3000);
|
|
||||||
final int PORT = getPort(hostPort);
|
|
||||||
if (factory == null) {
|
|
||||||
factory = ServerCnxnFactory.createFactory(PORT, maxCnxns);
|
|
||||||
}
|
|
||||||
factory.startup(zks);
|
|
||||||
Assert.assertTrue("waiting for server up",
|
|
||||||
ClientBaseWithFixes.waitForServerUp("127.0.0.1:" + PORT,
|
|
||||||
CONNECTION_TIMEOUT));
|
|
||||||
|
|
||||||
return factory;
|
static void shutdownServerInstance(ServerCnxnFactory factory,
|
||||||
}
|
String hostPort) {
|
||||||
|
if (factory != null) {
|
||||||
static void shutdownServerInstance(ServerCnxnFactory factory,
|
ZKDatabase zkDb;
|
||||||
String hostPort)
|
ZooKeeperServer zs = getServer(factory);
|
||||||
{
|
zkDb = zs.getZKDatabase();
|
||||||
if (factory != null) {
|
factory.shutdown();
|
||||||
ZKDatabase zkDb;
|
try {
|
||||||
{
|
zkDb.close();
|
||||||
ZooKeeperServer zs = getServer(factory);
|
} catch (IOException ie) {
|
||||||
|
LOG.warn("Error closing logs ", ie);
|
||||||
zkDb = zs.getZKDatabase();
|
}
|
||||||
}
|
final int port = NetUtils.getPortFromHostPort(hostPort);
|
||||||
factory.shutdown();
|
|
||||||
try {
|
Assert.assertTrue("waiting for server down", ClientBaseWithFixes
|
||||||
zkDb.close();
|
.waitForServerDown("127.0.0.1:" + port, CONNECTION_TIMEOUT));
|
||||||
} catch (IOException ie) {
|
|
||||||
LOG.warn("Error closing logs ", ie);
|
|
||||||
}
|
|
||||||
final int PORT = getPort(hostPort);
|
|
||||||
|
|
||||||
Assert.assertTrue("waiting for server down",
|
|
||||||
ClientBaseWithFixes.waitForServerDown("127.0.0.1:" + PORT,
|
|
||||||
CONNECTION_TIMEOUT));
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test specific setup
|
* Test specific setup
|
||||||
|
|
|
@ -24,6 +24,7 @@ import java.net.NetworkInterface;
|
||||||
import java.net.SocketException;
|
import java.net.SocketException;
|
||||||
import java.net.UnknownHostException;
|
import java.net.UnknownHostException;
|
||||||
import java.net.InetAddress;
|
import java.net.InetAddress;
|
||||||
|
import java.net.Inet6Address;
|
||||||
|
|
||||||
import javax.naming.CommunicationException;
|
import javax.naming.CommunicationException;
|
||||||
import javax.naming.NameNotFoundException;
|
import javax.naming.NameNotFoundException;
|
||||||
|
@ -251,4 +252,20 @@ public class TestDNS {
|
||||||
assertNotNull("localhost is null", localhost);
|
assertNotNull("localhost is null", localhost);
|
||||||
LOG.info("Localhost IPAddr is " + localhost.toString());
|
LOG.info("Localhost IPAddr is " + localhost.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Test that dns query address is calculated correctly for ipv6 addresses.
|
||||||
|
*/
|
||||||
|
@Test
|
||||||
|
public void testIPv6ReverseDNSAddress() throws Exception {
|
||||||
|
Inet6Address adr = (Inet6Address) InetAddress.getByName("::");
|
||||||
|
assertEquals(
|
||||||
|
"0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.ip6.arpa",
|
||||||
|
DNS.getIPv6DnsAddr(adr, null));
|
||||||
|
|
||||||
|
adr = (Inet6Address) InetAddress.getByName("fe80::62eb:69ff:fe9b:bade");
|
||||||
|
assertEquals(
|
||||||
|
"e.d.a.b.b.9.e.f.f.f.9.6.b.e.2.6.0.0.0.0.0.0.0.0.0.0.0.0.0.8.e.f.ip6.arpa",
|
||||||
|
DNS.getIPv6DnsAddr(adr, null));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -57,6 +57,14 @@ public class TestNetUtils {
|
||||||
private static final String DEST_PORT_NAME = Integer.toString(DEST_PORT);
|
private static final String DEST_PORT_NAME = Integer.toString(DEST_PORT);
|
||||||
private static final int LOCAL_PORT = 8080;
|
private static final int LOCAL_PORT = 8080;
|
||||||
private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
|
private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
|
||||||
|
private static final String IPV6_LOOPBACK_LONG_STRING = "0:0:0:0:0:0:0:1";
|
||||||
|
private static final String IPV6_SAMPLE_ADDRESS =
|
||||||
|
"2a03:2880:2130:cf05:face:b00c:0:1";
|
||||||
|
private static final String IPV6_LOOPBACK_SHORT_STRING = "::1";
|
||||||
|
private static final String IPV6_LOOPBACK_WITH_PORT =
|
||||||
|
"[" + IPV6_LOOPBACK_LONG_STRING + "]:10";
|
||||||
|
private static final String IPV6_SAMPLE_WITH_PORT =
|
||||||
|
"[" + IPV6_SAMPLE_ADDRESS + "]:10";
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Some slop around expected times when making sure timeouts behave
|
* Some slop around expected times when making sure timeouts behave
|
||||||
|
@ -583,13 +591,19 @@ public class TestNetUtils {
|
||||||
return addr;
|
return addr;
|
||||||
}
|
}
|
||||||
|
|
||||||
private void
|
private void verifyInetAddress(InetAddress addr, String host, String... ips) {
|
||||||
verifyInetAddress(InetAddress addr, String host, String ip) {
|
|
||||||
assertNotNull(addr);
|
assertNotNull(addr);
|
||||||
assertEquals(host, addr.getHostName());
|
assertEquals(host, addr.getHostName());
|
||||||
assertEquals(ip, addr.getHostAddress());
|
|
||||||
|
boolean found = false;
|
||||||
|
for (String ip : ips) {
|
||||||
|
found |= ip.equals(addr.getHostAddress());
|
||||||
|
}
|
||||||
|
assertTrue("Expected addr.getHostAddress[" + addr.getHostAddress()
|
||||||
|
+ "] to be one of " + StringUtils.join(ips, ","), found);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testResolverUnqualified() {
|
public void testResolverUnqualified() {
|
||||||
String host = "host";
|
String host = "host";
|
||||||
|
@ -619,12 +633,13 @@ public class TestNetUtils {
|
||||||
}
|
}
|
||||||
|
|
||||||
// localhost
|
// localhost
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testResolverLoopback() {
|
public void testResolverLoopback() {
|
||||||
String host = "Localhost";
|
String host = "Localhost";
|
||||||
InetAddress addr = verifyResolve(host); // no lookup should occur
|
InetAddress addr = verifyResolve(host); // no lookup should occur
|
||||||
verifyInetAddress(addr, "Localhost", "127.0.0.1");
|
verifyInetAddress(addr, "Localhost", "127.0.0.1", IPV6_LOOPBACK_LONG_STRING,
|
||||||
|
IPV6_LOOPBACK_SHORT_STRING);
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -721,19 +736,22 @@ public class TestNetUtils {
|
||||||
} catch (UnknownHostException e) {
|
} catch (UnknownHostException e) {
|
||||||
Assume.assumeTrue("Network not resolving "+ oneHost, false);
|
Assume.assumeTrue("Network not resolving "+ oneHost, false);
|
||||||
}
|
}
|
||||||
List<String> hosts = Arrays.asList("127.0.0.1",
|
List<String> hosts = Arrays.asList(new String[] {"127.0.0.1",
|
||||||
"localhost", oneHost, "UnknownHost123");
|
"localhost", oneHost, "UnknownHost123.invalid"});
|
||||||
List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts);
|
List<String> normalizedHosts = NetUtils.normalizeHostNames(hosts);
|
||||||
String summary = "original [" + StringUtils.join(hosts, ", ") + "]"
|
String summary = "original [" + StringUtils.join(hosts, ", ") + "]"
|
||||||
+ " normalized [" + StringUtils.join(normalizedHosts, ", ") + "]";
|
+ " normalized [" + StringUtils.join(normalizedHosts, ", ") + "]";
|
||||||
// when ipaddress is normalized, same address is expected in return
|
// when ipaddress is normalized, same address is expected in return
|
||||||
assertEquals(summary, hosts.get(0), normalizedHosts.get(0));
|
assertEquals(summary, hosts.get(0), normalizedHosts.get(0));
|
||||||
// for normalizing a resolvable hostname, resolved ipaddress is expected in return
|
// for normalizing a resolvable hostname, resolved ipaddress is expected in return
|
||||||
|
|
||||||
assertFalse("Element 1 equal "+ summary,
|
assertFalse("Element 1 equal "+ summary,
|
||||||
normalizedHosts.get(1).equals(hosts.get(1)));
|
normalizedHosts.get(1).equals(hosts.get(1)));
|
||||||
assertEquals(summary, hosts.get(0), normalizedHosts.get(1));
|
assertTrue("Should get the localhost address back",
|
||||||
// this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric,
|
normalizedHosts.get(1).equals(hosts.get(0)) || normalizedHosts.get(1)
|
||||||
// its ipaddress is expected to return
|
.equals(IPV6_LOOPBACK_LONG_STRING));
|
||||||
|
// this address HADOOP-8372: when normalizing a valid resolvable hostname
|
||||||
|
// start with numeric, its ipaddress is expected to return
|
||||||
assertFalse("Element 2 equal " + summary,
|
assertFalse("Element 2 equal " + summary,
|
||||||
normalizedHosts.get(2).equals(hosts.get(2)));
|
normalizedHosts.get(2).equals(hosts.get(2)));
|
||||||
// return the same hostname after normalizing a irresolvable hostname.
|
// return the same hostname after normalizing a irresolvable hostname.
|
||||||
|
@ -745,11 +763,22 @@ public class TestNetUtils {
|
||||||
assertNull(NetUtils.getHostNameOfIP(null));
|
assertNull(NetUtils.getHostNameOfIP(null));
|
||||||
assertNull(NetUtils.getHostNameOfIP(""));
|
assertNull(NetUtils.getHostNameOfIP(""));
|
||||||
assertNull(NetUtils.getHostNameOfIP("crazytown"));
|
assertNull(NetUtils.getHostNameOfIP("crazytown"));
|
||||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:")); // no port
|
|
||||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
|
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:-1")); // bogus port
|
||||||
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port
|
assertNull(NetUtils.getHostNameOfIP("127.0.0.1:A")); // bogus port
|
||||||
|
assertNotNull(NetUtils.getHostNameOfIP("[::1]"));
|
||||||
|
assertNotNull(NetUtils.getHostNameOfIP("[::1]:1"));
|
||||||
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
|
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1"));
|
||||||
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
|
assertNotNull(NetUtils.getHostNameOfIP("127.0.0.1:1"));
|
||||||
|
assertEquals("localhost", NetUtils.getHostNameOfIP("127.0.0.1:"));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testGetHostNameOfIPworksWithIPv6() {
|
||||||
|
assertNotNull(NetUtils.getHostNameOfIP(IPV6_LOOPBACK_LONG_STRING));
|
||||||
|
assertNotNull(NetUtils.getHostNameOfIP(IPV6_LOOPBACK_SHORT_STRING));
|
||||||
|
assertNotNull(NetUtils.getHostNameOfIP(IPV6_SAMPLE_ADDRESS));
|
||||||
|
assertNotNull(NetUtils.getHostNameOfIP(IPV6_SAMPLE_WITH_PORT));
|
||||||
|
assertNotNull(NetUtils.getHostNameOfIP(IPV6_LOOPBACK_WITH_PORT));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -763,6 +792,18 @@ public class TestNetUtils {
|
||||||
assertEquals(defaultAddr.trim(), NetUtils.getHostPortString(addr));
|
assertEquals(defaultAddr.trim(), NetUtils.getHostPortString(addr));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testTrimCreateSocketAddressIPv6() {
|
||||||
|
Configuration conf = new Configuration();
|
||||||
|
NetUtils.addStaticResolution("hostIPv6", IPV6_LOOPBACK_LONG_STRING);
|
||||||
|
final String defaultAddr = "hostIPv6:1 ";
|
||||||
|
|
||||||
|
InetSocketAddress addr = NetUtils.createSocketAddr(defaultAddr);
|
||||||
|
conf.setSocketAddr("myAddress", addr);
|
||||||
|
assertTrue("Trim should have been called on ipv6 hostname",
|
||||||
|
defaultAddr.trim().equalsIgnoreCase(NetUtils.getHostPortString(addr)));
|
||||||
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testBindToLocalAddress() throws Exception {
|
public void testBindToLocalAddress() throws Exception {
|
||||||
assertNotNull(NetUtils
|
assertNotNull(NetUtils
|
||||||
|
@ -776,4 +817,41 @@ public class TestNetUtils {
|
||||||
String gotStr = StringUtils.join(got, ", ");
|
String gotStr = StringUtils.join(got, ", ");
|
||||||
assertEquals(expectStr, gotStr);
|
assertEquals(expectStr, gotStr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testCreateSocketAddressWithIPV6() throws Throwable {
|
||||||
|
String ipv6Address = "2a03:2880:2130:cf05:face:b00c:0:1";
|
||||||
|
String ipv6WithPort = ipv6Address + ":12345";
|
||||||
|
|
||||||
|
InetSocketAddress addr = NetUtils.createSocketAddr(ipv6WithPort,
|
||||||
|
1000, "myconfig");
|
||||||
|
assertEquals("[" + ipv6Address + "]", addr.getHostName());
|
||||||
|
assertEquals(12345, addr.getPort());
|
||||||
|
|
||||||
|
String ipv6SampleAddressWithScope = ipv6Address + "%2";
|
||||||
|
ipv6WithPort = ipv6SampleAddressWithScope + ":12345";
|
||||||
|
addr = NetUtils.createSocketAddr(ipv6WithPort, 1000, "myconfig");
|
||||||
|
assertEquals("[" + ipv6Address + "]", addr.getHostName());
|
||||||
|
assertEquals(12345, addr.getPort());
|
||||||
|
|
||||||
|
ipv6Address = "[2a03:2880:2130:cf05:face:b00c:0:1]";
|
||||||
|
ipv6WithPort = ipv6Address + ":12345";
|
||||||
|
|
||||||
|
addr = NetUtils.createSocketAddr(ipv6WithPort, 1000, "myconfig");
|
||||||
|
assertEquals(ipv6Address, addr.getHostName());
|
||||||
|
assertEquals(12345, addr.getPort());
|
||||||
|
|
||||||
|
String ipv6AddressWithScheme =
|
||||||
|
"https://2a03:2880:2130:cf05:face:b00c:0:1:12345";
|
||||||
|
addr = NetUtils.createSocketAddr(ipv6AddressWithScheme, 1000,
|
||||||
|
"myconfig");
|
||||||
|
assertEquals(ipv6Address, addr.getHostName());
|
||||||
|
assertEquals(12345, addr.getPort());
|
||||||
|
|
||||||
|
ipv6AddressWithScheme = "https://[2a03:2880:2130:cf05:face:b00c:0:1]:12345";
|
||||||
|
addr = NetUtils.createSocketAddr(ipv6AddressWithScheme, 1000,
|
||||||
|
"myconfig");
|
||||||
|
assertEquals(ipv6Address, addr.getHostName());
|
||||||
|
assertEquals(12345, addr.getPort());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
import org.apache.hadoop.ipc.Server;
|
import org.apache.hadoop.ipc.Server;
|
||||||
import org.apache.hadoop.ipc.TestRpcBase;
|
import org.apache.hadoop.ipc.TestRpcBase;
|
||||||
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
|
|
||||||
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
import org.apache.hadoop.security.authorize.DefaultImpersonationProvider;
|
||||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
|
@ -361,7 +360,6 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
public void testProxyWithToken() throws Exception {
|
public void testProxyWithToken() throws Exception {
|
||||||
final Configuration conf = new Configuration(masterConf);
|
final Configuration conf = new Configuration(masterConf);
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, conf);
|
|
||||||
RPC.setProtocolEngine(conf, TestRpcService.class,
|
RPC.setProtocolEngine(conf, TestRpcService.class,
|
||||||
ProtobufRpcEngine2.class);
|
ProtobufRpcEngine2.class);
|
||||||
UserGroupInformation.setConfiguration(conf);
|
UserGroupInformation.setConfiguration(conf);
|
||||||
|
@ -408,7 +406,6 @@ public class TestDoAsEffectiveUser extends TestRpcBase {
|
||||||
public void testTokenBySuperUser() throws Exception {
|
public void testTokenBySuperUser() throws Exception {
|
||||||
TestTokenSecretManager sm = new TestTokenSecretManager();
|
TestTokenSecretManager sm = new TestTokenSecretManager();
|
||||||
final Configuration newConf = new Configuration(masterConf);
|
final Configuration newConf = new Configuration(masterConf);
|
||||||
SecurityUtil.setAuthenticationMethod(AuthenticationMethod.KERBEROS, newConf);
|
|
||||||
// Set RPC engine to protobuf RPC engine
|
// Set RPC engine to protobuf RPC engine
|
||||||
RPC.setProtocolEngine(newConf, TestRpcService.class,
|
RPC.setProtocolEngine(newConf, TestRpcService.class,
|
||||||
ProtobufRpcEngine2.class);
|
ProtobufRpcEngine2.class);
|
||||||
|
|
|
@ -182,16 +182,12 @@ public class TestSecurityUtil {
|
||||||
conf.setBoolean(
|
conf.setBoolean(
|
||||||
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, true);
|
CommonConfigurationKeys.HADOOP_SECURITY_TOKEN_SERVICE_USE_IP, true);
|
||||||
SecurityUtil.setConfiguration(conf);
|
SecurityUtil.setConfiguration(conf);
|
||||||
assertEquals("127.0.0.1:123",
|
assertOneOf(SecurityUtil
|
||||||
SecurityUtil.buildTokenService(new InetSocketAddress("LocalHost", 123)).toString()
|
.buildTokenService(NetUtils.createSocketAddrForHost("LocalHost", 123))
|
||||||
);
|
.toString(), "127.0.0.1:123", "[0:0:0:0:0:0:0:1]:123");
|
||||||
assertEquals("127.0.0.1:123",
|
assertOneOf(SecurityUtil
|
||||||
SecurityUtil.buildTokenService(new InetSocketAddress("127.0.0.1", 123)).toString()
|
.buildTokenService(NetUtils.createSocketAddrForHost("127.0.0.1", 123))
|
||||||
);
|
.toString(), "127.0.0.1:123", "[0:0:0:0:0:0:0:1]:123");
|
||||||
// what goes in, comes out
|
|
||||||
assertEquals("127.0.0.1:123",
|
|
||||||
SecurityUtil.buildTokenService(NetUtils.createSocketAddr("127.0.0.1", 123)).toString()
|
|
||||||
);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
|
@ -496,4 +492,13 @@ public class TestSecurityUtil {
|
||||||
ZK_AUTH_VALUE.toCharArray());
|
ZK_AUTH_VALUE.toCharArray());
|
||||||
provider.flush();
|
provider.flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private void assertOneOf(String value, String... expected) {
|
||||||
|
boolean found = false;
|
||||||
|
for (String ip : expected) {
|
||||||
|
found |= ip.equals(value);
|
||||||
|
}
|
||||||
|
assertTrue("Expected value [" + value + "] to be one of " + StringUtils
|
||||||
|
.join(",", expected), found);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.Slot;
|
||||||
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
import org.apache.hadoop.hdfs.shortcircuit.ShortCircuitShm.SlotId;
|
||||||
import org.apache.hadoop.hdfs.util.IOUtilsClient;
|
import org.apache.hadoop.hdfs.util.IOUtilsClient;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.unix.DomainSocket;
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -876,6 +877,6 @@ public class BlockReaderFactory implements ShortCircuitReplicaCreator {
|
||||||
*/
|
*/
|
||||||
public static String getFileName(final InetSocketAddress s,
|
public static String getFileName(final InetSocketAddress s,
|
||||||
final String poolId, final long blockId) {
|
final String poolId, final long blockId) {
|
||||||
return s.toString() + ":" + poolId + ":" + blockId;
|
return NetUtils.getSocketAddressString(s) + ":" + poolId + ":" + blockId;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
|
||||||
import org.apache.hadoop.classification.InterfaceStability;
|
import org.apache.hadoop.classification.InterfaceStability;
|
||||||
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -125,8 +125,9 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setIpAddr(String ipAddr) {
|
public void setIpAddr(String ipAddr) {
|
||||||
|
this.ipAddr = ipAddr;
|
||||||
//updated during registration, preserve former xferPort
|
//updated during registration, preserve former xferPort
|
||||||
setIpAndXferPort(ipAddr, getByteString(ipAddr), xferPort);
|
setIpAndXferPort(this.ipAddr, getByteString(ipAddr), xferPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
private void setIpAndXferPort(String ipAddr, ByteString ipAddrBytes,
|
private void setIpAndXferPort(String ipAddr, ByteString ipAddrBytes,
|
||||||
|
@ -135,7 +136,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
this.ipAddr = ipAddr;
|
this.ipAddr = ipAddr;
|
||||||
this.ipAddrBytes = ipAddrBytes;
|
this.ipAddrBytes = ipAddrBytes;
|
||||||
this.xferPort = xferPort;
|
this.xferPort = xferPort;
|
||||||
this.xferAddr = ipAddr + ":" + xferPort;
|
this.xferAddr = NetUtils.getIPPortString(ipAddr, xferPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setPeerHostName(String peerHostName) {
|
public void setPeerHostName(String peerHostName) {
|
||||||
|
@ -201,21 +202,21 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
* @return IP:ipcPort string
|
* @return IP:ipcPort string
|
||||||
*/
|
*/
|
||||||
private String getIpcAddr() {
|
private String getIpcAddr() {
|
||||||
return ipAddr + ":" + ipcPort;
|
return NetUtils.getIPPortString(ipAddr, ipcPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return IP:infoPort string
|
* @return IP:infoPort string
|
||||||
*/
|
*/
|
||||||
public String getInfoAddr() {
|
public String getInfoAddr() {
|
||||||
return ipAddr + ":" + infoPort;
|
return NetUtils.getIPPortString(ipAddr, infoPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return IP:infoPort string
|
* @return IP:infoPort string
|
||||||
*/
|
*/
|
||||||
public String getInfoSecureAddr() {
|
public String getInfoSecureAddr() {
|
||||||
return ipAddr + ":" + infoSecurePort;
|
return NetUtils.getIPPortString(ipAddr, infoSecurePort);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -299,6 +300,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
|
||||||
* Note that this does not update storageID.
|
* Note that this does not update storageID.
|
||||||
*/
|
*/
|
||||||
public void updateRegInfo(DatanodeID nodeReg) {
|
public void updateRegInfo(DatanodeID nodeReg) {
|
||||||
|
ipAddr = nodeReg.getIpAddr();
|
||||||
setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getIpAddrBytes(),
|
setIpAndXferPort(nodeReg.getIpAddr(), nodeReg.getIpAddrBytes(),
|
||||||
nodeReg.getXferPort());
|
nodeReg.getXferPort());
|
||||||
hostName = nodeReg.getHostName();
|
hostName = nodeReg.getHostName();
|
||||||
|
|
|
@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DataTransferEncr
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto;
|
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.HandshakeSecretProto;
|
||||||
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
|
import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.CipherOptionProto;
|
||||||
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
import org.apache.hadoop.hdfs.protocolPB.PBHelperClient;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.SaslPropertiesResolver;
|
import org.apache.hadoop.security.SaslPropertiesResolver;
|
||||||
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
|
import org.apache.hadoop.security.SaslRpcServer.QualityOfProtection;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -60,7 +61,6 @@ import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
|
import org.apache.hadoop.thirdparty.com.google.common.collect.ImmutableSet;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
|
import org.apache.hadoop.thirdparty.com.google.common.collect.Maps;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
|
||||||
import org.apache.hadoop.thirdparty.protobuf.ByteString;
|
import org.apache.hadoop.thirdparty.protobuf.ByteString;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -157,11 +157,8 @@ public final class DataTransferSaslUtil {
|
||||||
* @return InetAddress from peer
|
* @return InetAddress from peer
|
||||||
*/
|
*/
|
||||||
public static InetAddress getPeerAddress(Peer peer) {
|
public static InetAddress getPeerAddress(Peer peer) {
|
||||||
String remoteAddr = peer.getRemoteAddressString().split(":")[0];
|
String remoteAddr = peer.getRemoteAddressString();
|
||||||
int slashIdx = remoteAddr.indexOf('/');
|
return NetUtils.getInetAddressFromInetSocketAddressString(remoteAddr);
|
||||||
return InetAddresses.forString(slashIdx != -1 ?
|
|
||||||
remoteAddr.substring(slashIdx + 1, remoteAddr.length()) :
|
|
||||||
remoteAddr);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -305,7 +305,7 @@ public class JsonUtilClient {
|
||||||
if (ipAddr == null) {
|
if (ipAddr == null) {
|
||||||
String name = getString(m, "name", null);
|
String name = getString(m, "name", null);
|
||||||
if (name != null) {
|
if (name != null) {
|
||||||
int colonIdx = name.indexOf(':');
|
int colonIdx = name.lastIndexOf(':');
|
||||||
if (colonIdx > 0) {
|
if (colonIdx > 0) {
|
||||||
ipAddr = name.substring(0, colonIdx);
|
ipAddr = name.substring(0, colonIdx);
|
||||||
xferPort = Integer.parseInt(name.substring(colonIdx +1));
|
xferPort = Integer.parseInt(name.substring(colonIdx +1));
|
||||||
|
|
|
@ -54,12 +54,12 @@ import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
|
||||||
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
|
||||||
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
import org.apache.hadoop.ipc.ProtobufRpcEngine2;
|
||||||
import org.apache.hadoop.ipc.RPC;
|
import org.apache.hadoop.ipc.RPC;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.SecurityUtil;
|
import org.apache.hadoop.security.SecurityUtil;
|
||||||
import org.apache.hadoop.util.StopWatch;
|
import org.apache.hadoop.util.StopWatch;
|
||||||
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Preconditions;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.FutureCallback;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.Futures;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
|
import org.apache.hadoop.thirdparty.com.google.common.util.concurrent.ListenableFuture;
|
||||||
|
@ -709,8 +709,7 @@ public class IPCLoggerChannel implements AsyncLogger {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public String toString() {
|
public String toString() {
|
||||||
return InetAddresses.toAddrString(addr.getAddress()) + ':' +
|
return NetUtils.getHostPortString(addr);
|
||||||
addr.getPort();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.net.NetworkTopology.InvalidTopologyException;
|
||||||
import org.apache.hadoop.security.token.Token;
|
import org.apache.hadoop.security.token.Token;
|
||||||
import org.apache.hadoop.util.Daemon;
|
import org.apache.hadoop.util.Daemon;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
|
import org.apache.http.conn.util.InetAddressUtils;
|
||||||
import org.apache.hadoop.util.Sets;
|
import org.apache.hadoop.util.Sets;
|
||||||
import org.apache.hadoop.util.Timer;
|
import org.apache.hadoop.util.Timer;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -1522,7 +1523,13 @@ public class DatanodeManager {
|
||||||
DatanodeID dnId;
|
DatanodeID dnId;
|
||||||
String hostStr;
|
String hostStr;
|
||||||
int port;
|
int port;
|
||||||
int idx = hostLine.indexOf(':');
|
int idx;
|
||||||
|
|
||||||
|
if (InetAddressUtils.isIPv6StdAddress(hostLine)) {
|
||||||
|
idx = -1;
|
||||||
|
} else {
|
||||||
|
idx = hostLine.lastIndexOf(':');
|
||||||
|
}
|
||||||
|
|
||||||
if (-1 == idx) {
|
if (-1 == idx) {
|
||||||
hostStr = hostLine;
|
hostStr = hostLine;
|
||||||
|
|
|
@ -23,12 +23,11 @@ import org.slf4j.LoggerFactory;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.util.HostsFileReader;
|
import org.apache.hadoop.util.HostsFileReader;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.InetSocketAddress;
|
import java.net.InetSocketAddress;
|
||||||
import java.net.URI;
|
|
||||||
import java.net.URISyntaxException;
|
|
||||||
import java.util.HashSet;
|
import java.util.HashSet;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -89,16 +88,14 @@ public class HostFileManager extends HostConfigManager {
|
||||||
@VisibleForTesting
|
@VisibleForTesting
|
||||||
static InetSocketAddress parseEntry(String type, String fn, String line) {
|
static InetSocketAddress parseEntry(String type, String fn, String line) {
|
||||||
try {
|
try {
|
||||||
URI uri = new URI("dummy", line, null, null, null);
|
InetSocketAddress addr = NetUtils.createSocketAddr(line, 0);
|
||||||
int port = uri.getPort() == -1 ? 0 : uri.getPort();
|
|
||||||
InetSocketAddress addr = new InetSocketAddress(uri.getHost(), port);
|
|
||||||
if (addr.isUnresolved()) {
|
if (addr.isUnresolved()) {
|
||||||
LOG.warn(String.format("Failed to resolve address `%s` in `%s`. " +
|
LOG.warn(String.format("Failed to resolve address `%s` in `%s`. " +
|
||||||
"Ignoring in the %s list.", line, fn, type));
|
"Ignoring in the %s list.", line, fn, type));
|
||||||
return null;
|
return null;
|
||||||
}
|
}
|
||||||
return addr;
|
return addr;
|
||||||
} catch (URISyntaxException e) {
|
} catch (IllegalArgumentException e) {
|
||||||
LOG.warn(String.format("Failed to parse `%s` in `%s`. " + "Ignoring in " +
|
LOG.warn(String.format("Failed to parse `%s` in `%s`. " + "Ignoring in " +
|
||||||
"the %s list.", line, fn, type));
|
"the %s list.", line, fn, type));
|
||||||
}
|
}
|
||||||
|
|
|
@ -80,11 +80,21 @@ public class BlockPoolSliceStorage extends Storage {
|
||||||
* progress. Do not delete the 'previous' directory.
|
* progress. Do not delete the 'previous' directory.
|
||||||
*/
|
*/
|
||||||
static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
|
static final String ROLLING_UPGRADE_MARKER_FILE = "RollingUpgradeInProgress";
|
||||||
|
private static final String BLOCK_POOL_ID_IPV4_PATTERN_BASE =
|
||||||
|
"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}";
|
||||||
|
|
||||||
|
// Because we don't support ":" in path BlockPoolID on IPv6 boxes we replace
|
||||||
|
// ":" with ".".
|
||||||
|
// Also format of IPv6 is less fixed so we surround it with square brackets
|
||||||
|
// and just check that match
|
||||||
|
private static final String BLOCK_POOL_ID_IPV6_PATTERN_BASE =
|
||||||
|
Pattern.quote("[") + "(?:.*)" + Pattern.quote("]");
|
||||||
|
|
||||||
private static final String BLOCK_POOL_ID_PATTERN_BASE =
|
private static final String BLOCK_POOL_ID_PATTERN_BASE =
|
||||||
Pattern.quote(File.separator) +
|
Pattern.quote(File.separator) + "BP-\\d+-(?:"
|
||||||
"BP-\\d+-\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}-\\d+" +
|
+ BLOCK_POOL_ID_IPV4_PATTERN_BASE + "|"
|
||||||
Pattern.quote(File.separator);
|
+ BLOCK_POOL_ID_IPV6_PATTERN_BASE + ")-\\d+" + Pattern
|
||||||
|
.quote(File.separator);
|
||||||
|
|
||||||
private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern.compile(
|
private static final Pattern BLOCK_POOL_PATH_PATTERN = Pattern.compile(
|
||||||
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(.*)$");
|
"^(.*)(" + BLOCK_POOL_ID_PATTERN_BASE + ")(.*)$");
|
||||||
|
|
|
@ -147,7 +147,7 @@ class DataXceiver extends Receiver implements Runnable {
|
||||||
this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf());
|
this.ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(datanode.getConf());
|
||||||
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf());
|
this.smallBufferSize = DFSUtilClient.getSmallBufferSize(datanode.getConf());
|
||||||
remoteAddress = peer.getRemoteAddressString();
|
remoteAddress = peer.getRemoteAddressString();
|
||||||
final int colonIdx = remoteAddress.indexOf(':');
|
final int colonIdx = remoteAddress.lastIndexOf(':');
|
||||||
remoteAddressWithoutPort =
|
remoteAddressWithoutPort =
|
||||||
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
|
(colonIdx < 0) ? remoteAddress : remoteAddress.substring(0, colonIdx);
|
||||||
localAddress = peer.getLocalAddressString();
|
localAddress = peer.getLocalAddressString();
|
||||||
|
|
|
@ -103,9 +103,9 @@ class Checkpointer extends Daemon {
|
||||||
checkpointConf = new CheckpointConf(conf);
|
checkpointConf = new CheckpointConf(conf);
|
||||||
|
|
||||||
// Pull out exact http address for posting url to avoid ip aliasing issues
|
// Pull out exact http address for posting url to avoid ip aliasing issues
|
||||||
String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
String fullInfoAddr = conf.get(DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
|
||||||
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
|
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_DEFAULT);
|
||||||
infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.indexOf(":"));
|
infoBindAddress = fullInfoAddr.substring(0, fullInfoAddr.lastIndexOf(":"));
|
||||||
|
|
||||||
LOG.info("Checkpoint Period : " +
|
LOG.info("Checkpoint Period : " +
|
||||||
checkpointConf.getPeriod() + " secs " +
|
checkpointConf.getPeriod() + " secs " +
|
||||||
|
|
|
@ -56,6 +56,7 @@ import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.net.DNS;
|
import org.apache.hadoop.net.DNS;
|
||||||
import org.apache.hadoop.util.Lists;
|
import org.apache.hadoop.util.Lists;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
import org.apache.http.conn.util.InetAddressUtils;
|
||||||
import org.eclipse.jetty.util.ajax.JSON;
|
import org.eclipse.jetty.util.ajax.JSON;
|
||||||
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
|
@ -1024,6 +1025,10 @@ public class NNStorage extends Storage implements Closeable,
|
||||||
String ip;
|
String ip;
|
||||||
try {
|
try {
|
||||||
ip = DNS.getDefaultIP("default");
|
ip = DNS.getDefaultIP("default");
|
||||||
|
if (InetAddressUtils.isIPv6StdAddress(ip)) {
|
||||||
|
// HDFS doesn't support ":" in path, replace it with "."
|
||||||
|
ip = "[" + ip.replaceAll(":", ".") + "]";
|
||||||
|
}
|
||||||
} catch (UnknownHostException e) {
|
} catch (UnknownHostException e) {
|
||||||
LOG.warn("Could not find ip address of \"default\" inteface.");
|
LOG.warn("Could not find ip address of \"default\" inteface.");
|
||||||
throw e;
|
throw e;
|
||||||
|
|
|
@ -117,6 +117,7 @@ import org.apache.hadoop.util.StringUtils;
|
||||||
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
import org.apache.hadoop.thirdparty.com.google.common.base.Charsets;
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.net.HostAndPort;
|
||||||
import com.sun.jersey.spi.container.ResourceFilters;
|
import com.sun.jersey.spi.container.ResourceFilters;
|
||||||
|
|
||||||
/** Web-hdfs NameNode implementation. */
|
/** Web-hdfs NameNode implementation. */
|
||||||
|
@ -273,22 +274,22 @@ public class NamenodeWebHdfsMethods {
|
||||||
|
|
||||||
HashSet<Node> excludes = new HashSet<Node>();
|
HashSet<Node> excludes = new HashSet<Node>();
|
||||||
if (excludeDatanodes != null) {
|
if (excludeDatanodes != null) {
|
||||||
for (String host : StringUtils
|
for (String hostAndPort : StringUtils
|
||||||
.getTrimmedStringCollection(excludeDatanodes)) {
|
.getTrimmedStringCollection(excludeDatanodes)) {
|
||||||
int idx = host.indexOf(":");
|
HostAndPort hp = HostAndPort.fromString(hostAndPort);
|
||||||
Node excludeNode = null;
|
Node excludeNode = null;
|
||||||
if (idx != -1) {
|
if (hp.hasPort()) {
|
||||||
excludeNode = bm.getDatanodeManager().getDatanodeByXferAddr(
|
excludeNode = bm.getDatanodeManager()
|
||||||
host.substring(0, idx), Integer.parseInt(host.substring(idx + 1)));
|
.getDatanodeByXferAddr(hp.getHost(), hp.getPort());
|
||||||
} else {
|
} else {
|
||||||
excludeNode = bm.getDatanodeManager().getDatanodeByHost(host);
|
excludeNode = bm.getDatanodeManager().getDatanodeByHost(hostAndPort);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (excludeNode != null) {
|
if (excludeNode != null) {
|
||||||
excludes.add(excludeNode);
|
excludes.add(excludeNode);
|
||||||
} else {
|
} else {
|
||||||
LOG.debug("DataNode {} was requested to be excluded, "
|
LOG.debug("DataNode {} was requested to be excluded, "
|
||||||
+ "but it was not found.", host);
|
+ "but it was not found.", hostAndPort);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil;
|
import org.apache.hadoop.hdfs.DFSUtil;
|
||||||
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
import org.apache.hadoop.hdfs.HdfsConfiguration;
|
||||||
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
import org.apache.hadoop.hdfs.DFSUtil.ConfiguredNNAddress;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.util.StringUtils;
|
import org.apache.hadoop.util.StringUtils;
|
||||||
import org.apache.hadoop.util.Tool;
|
import org.apache.hadoop.util.Tool;
|
||||||
|
@ -245,7 +246,7 @@ public class GetConf extends Configured implements Tool {
|
||||||
if (!cnnlist.isEmpty()) {
|
if (!cnnlist.isEmpty()) {
|
||||||
for (ConfiguredNNAddress cnn : cnnlist) {
|
for (ConfiguredNNAddress cnn : cnnlist) {
|
||||||
InetSocketAddress rpc = cnn.getAddress();
|
InetSocketAddress rpc = cnn.getAddress();
|
||||||
tool.printOut(rpc.getHostName()+":"+rpc.getPort());
|
tool.printOut(NetUtils.getHostPortString(rpc));
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,6 +34,7 @@ import io.netty.handler.codec.string.StringEncoder;
|
||||||
import io.netty.util.concurrent.GlobalEventExecutor;
|
import io.netty.util.concurrent.GlobalEventExecutor;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
|
||||||
import org.apache.hadoop.security.UserGroupInformation;
|
import org.apache.hadoop.security.UserGroupInformation;
|
||||||
|
@ -122,7 +123,9 @@ public class WebImageViewer implements Closeable {
|
||||||
allChannels.add(channel);
|
allChannels.add(channel);
|
||||||
|
|
||||||
address = (InetSocketAddress) channel.localAddress();
|
address = (InetSocketAddress) channel.localAddress();
|
||||||
LOG.info("WebImageViewer started. Listening on " + address.toString() + ". Press Ctrl+C to stop the viewer.");
|
LOG.info("WebImageViewer started. Listening on " + NetUtils
|
||||||
|
.getSocketAddressString(address) +
|
||||||
|
". Press Ctrl+C to stop the viewer.");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -227,8 +227,10 @@
|
||||||
var n = nodes[i];
|
var n = nodes[i];
|
||||||
n.usedPercentage = Math.round((n.used + n.nonDfsUsedSpace) * 1.0 / n.capacity * 100);
|
n.usedPercentage = Math.round((n.used + n.nonDfsUsedSpace) * 1.0 / n.capacity * 100);
|
||||||
|
|
||||||
var port = n.infoAddr.split(":")[1];
|
var array = n.infoAddr.split(":");
|
||||||
var securePort = n.infoSecureAddr.split(":")[1];
|
var port = array[array.length-1];
|
||||||
|
array = n.infoSecureAddr.split(":");
|
||||||
|
var securePort = array[array.length-1];
|
||||||
var dnHost = n.name.split(":")[0];
|
var dnHost = n.name.split(":")[0];
|
||||||
n.dnWebAddress = "http://" + dnHost + ":" + port;
|
n.dnWebAddress = "http://" + dnHost + ":" + port;
|
||||||
if (securePort != 0) {
|
if (securePort != 0) {
|
||||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
|
||||||
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
|
||||||
|
@ -55,7 +56,7 @@ public class TestDFSAddressConfig {
|
||||||
ArrayList<DataNode> dns = cluster.getDataNodes();
|
ArrayList<DataNode> dns = cluster.getDataNodes();
|
||||||
DataNode dn = dns.get(0);
|
DataNode dn = dns.get(0);
|
||||||
|
|
||||||
String selfSocketAddr = dn.getXferAddress().toString();
|
String selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
|
||||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||||
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||||
|
|
||||||
|
@ -80,7 +81,7 @@ public class TestDFSAddressConfig {
|
||||||
dns = cluster.getDataNodes();
|
dns = cluster.getDataNodes();
|
||||||
dn = dns.get(0);
|
dn = dns.get(0);
|
||||||
|
|
||||||
selfSocketAddr = dn.getXferAddress().toString();
|
selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
|
||||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||||
// assert that default self socket address is 127.0.0.1
|
// assert that default self socket address is 127.0.0.1
|
||||||
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
assertTrue(selfSocketAddr.contains("/127.0.0.1:"));
|
||||||
|
@ -105,10 +106,11 @@ public class TestDFSAddressConfig {
|
||||||
dns = cluster.getDataNodes();
|
dns = cluster.getDataNodes();
|
||||||
dn = dns.get(0);
|
dn = dns.get(0);
|
||||||
|
|
||||||
selfSocketAddr = dn.getXferAddress().toString();
|
selfSocketAddr = NetUtils.getSocketAddressString(dn.getXferAddress());
|
||||||
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
System.out.println("DN Self Socket Addr == " + selfSocketAddr);
|
||||||
// assert that default self socket address is 0.0.0.0
|
// assert that default self socket address is 0.0.0.0
|
||||||
assertTrue(selfSocketAddr.contains("/0.0.0.0:"));
|
assertTrue(selfSocketAddr.contains("/0.0.0.0:") ||
|
||||||
|
selfSocketAddr.contains("/[0:0:0:0:0:0:0:0]:"));
|
||||||
|
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
|
@ -541,7 +541,7 @@ public class TestDFSUtil {
|
||||||
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn1"),
|
||||||
NS2_NN1_HOST);
|
NS2_NN1_HOST);
|
||||||
conf.set(DFSUtil.addKeySuffixes(
|
conf.set(DFSUtil.addKeySuffixes(
|
||||||
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
|
DFS_NAMENODE_RPC_ADDRESS_KEY, "ns2", "ns2-nn2"),
|
||||||
NS2_NN2_HOST);
|
NS2_NN2_HOST);
|
||||||
|
|
||||||
Map<String, Map<String, InetSocketAddress>> map =
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
|
@ -550,17 +550,21 @@ public class TestDFSUtil {
|
||||||
assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
|
assertTrue(HAUtil.isHAEnabled(conf, "ns1"));
|
||||||
assertTrue(HAUtil.isHAEnabled(conf, "ns2"));
|
assertTrue(HAUtil.isHAEnabled(conf, "ns2"));
|
||||||
assertFalse(HAUtil.isHAEnabled(conf, "ns3"));
|
assertFalse(HAUtil.isHAEnabled(conf, "ns3"));
|
||||||
|
|
||||||
assertEquals(NS1_NN1_HOST, map.get("ns1").get("ns1-nn1").toString());
|
assertEquals(NS1_NN1_HOST,
|
||||||
assertEquals(NS1_NN2_HOST, map.get("ns1").get("ns1-nn2").toString());
|
NetUtils.getHostPortString(map.get("ns1").get("ns1-nn1")));
|
||||||
assertEquals(NS2_NN1_HOST, map.get("ns2").get("ns2-nn1").toString());
|
assertEquals(NS1_NN2_HOST,
|
||||||
assertEquals(NS2_NN2_HOST, map.get("ns2").get("ns2-nn2").toString());
|
NetUtils.getHostPortString(map.get("ns1").get("ns1-nn2")));
|
||||||
|
assertEquals(NS2_NN1_HOST,
|
||||||
assertEquals(NS1_NN1_HOST,
|
NetUtils.getHostPortString(map.get("ns2").get("ns2-nn1")));
|
||||||
|
assertEquals(NS2_NN2_HOST,
|
||||||
|
NetUtils.getHostPortString(map.get("ns2").get("ns2-nn2")));
|
||||||
|
|
||||||
|
assertEquals(NS1_NN1_HOST,
|
||||||
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
|
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn1"));
|
||||||
assertEquals(NS1_NN2_HOST,
|
assertEquals(NS1_NN2_HOST,
|
||||||
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
|
DFSUtil.getNamenodeServiceAddr(conf, "ns1", "ns1-nn2"));
|
||||||
assertEquals(NS2_NN1_HOST,
|
assertEquals(NS2_NN1_HOST,
|
||||||
DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
|
DFSUtil.getNamenodeServiceAddr(conf, "ns2", "ns2-nn1"));
|
||||||
|
|
||||||
// No nameservice was given and we can't determine which service addr
|
// No nameservice was given and we can't determine which service addr
|
||||||
|
@ -630,8 +634,29 @@ public class TestDFSUtil {
|
||||||
Map<String, Map<String, InetSocketAddress>> map =
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
|
DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
|
||||||
|
|
||||||
assertEquals(NS1_NN1_ADDR, map.get("ns1").get("nn1").toString());
|
assertEquals(NS1_NN1_ADDR,
|
||||||
assertEquals(NS1_NN2_ADDR, map.get("ns1").get("nn2").toString());
|
NetUtils.getHostPortString(map.get("ns1").get("nn1")));
|
||||||
|
assertEquals(NS1_NN2_ADDR,
|
||||||
|
NetUtils.getHostPortString(map.get("ns1").get("nn2")));
|
||||||
|
}
|
||||||
|
|
||||||
|
@Test
|
||||||
|
public void testIPv6GetHaNnHttpAddresses() throws IOException {
|
||||||
|
final String logicalHostName = "ns1";
|
||||||
|
final String ns1Nn1Addr = "[0:0:0:0:0:b00c:c0a8:12a]:8020";
|
||||||
|
final String ns1Nn2Addr = "[::face:a0b:182a]:8020";
|
||||||
|
|
||||||
|
Configuration conf =
|
||||||
|
createWebHDFSHAConfiguration(logicalHostName, ns1Nn1Addr,
|
||||||
|
ns1Nn2Addr);
|
||||||
|
|
||||||
|
Map<String, Map<String, InetSocketAddress>> map =
|
||||||
|
DFSUtilClient.getHaNnWebHdfsAddresses(conf, "webhdfs");
|
||||||
|
|
||||||
|
assertEquals(ns1Nn1Addr,
|
||||||
|
NetUtils.getHostPortString(map.get("ns1").get("nn1")));
|
||||||
|
assertEquals(ns1Nn2Addr.replace("::", "0:0:0:0:0:"),
|
||||||
|
NetUtils.getHostPortString(map.get("ns1").get("nn2")));
|
||||||
}
|
}
|
||||||
|
|
||||||
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
|
private static Configuration createWebHDFSHAConfiguration(String logicalHostName, String nnaddr1, String nnaddr2) {
|
||||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetTestUtil;
|
||||||
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetUtil;
|
||||||
import org.apache.hadoop.io.IOUtils;
|
import org.apache.hadoop.io.IOUtils;
|
||||||
import org.apache.hadoop.ipc.RemoteException;
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
import org.apache.hadoop.util.DataChecksum;
|
import org.apache.hadoop.util.DataChecksum;
|
||||||
import org.apache.hadoop.util.Time;
|
import org.apache.hadoop.util.Time;
|
||||||
|
@ -554,7 +555,8 @@ public class TestFileAppend{
|
||||||
|
|
||||||
// stop one datanode
|
// stop one datanode
|
||||||
DataNodeProperties dnProp = cluster.stopDataNode(0);
|
DataNodeProperties dnProp = cluster.stopDataNode(0);
|
||||||
String dnAddress = dnProp.datanode.getXferAddress().toString();
|
String dnAddress = NetUtils.getSocketAddressString(
|
||||||
|
dnProp.datanode.getXferAddress());
|
||||||
if (dnAddress.startsWith("/")) {
|
if (dnAddress.startsWith("/")) {
|
||||||
dnAddress = dnAddress.substring(1);
|
dnAddress = dnAddress.substring(1);
|
||||||
}
|
}
|
||||||
|
@ -609,7 +611,8 @@ public class TestFileAppend{
|
||||||
|
|
||||||
// stop one datanode
|
// stop one datanode
|
||||||
DataNodeProperties dnProp = cluster.stopDataNode(0);
|
DataNodeProperties dnProp = cluster.stopDataNode(0);
|
||||||
String dnAddress = dnProp.datanode.getXferAddress().toString();
|
String dnAddress = NetUtils
|
||||||
|
.getSocketAddressString(dnProp.datanode.getXferAddress());
|
||||||
if (dnAddress.startsWith("/")) {
|
if (dnAddress.startsWith("/")) {
|
||||||
dnAddress = dnAddress.substring(1);
|
dnAddress = dnAddress.substring(1);
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,11 +92,14 @@ import org.apache.hadoop.util.Time;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.slf4j.event.Level;
|
import org.slf4j.event.Level;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class tests various cases during file creation.
|
* This class tests various cases during file creation.
|
||||||
*/
|
*/
|
||||||
public class TestFileCreation {
|
public class TestFileCreation {
|
||||||
|
public static final Log LOG = LogFactory.getLog(TestFileCreation.class);
|
||||||
static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
|
static final String DIR = "/" + TestFileCreation.class.getSimpleName() + "/";
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -125,7 +128,7 @@ public class TestFileCreation {
|
||||||
// creates a file but does not close it
|
// creates a file but does not close it
|
||||||
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
|
public static FSDataOutputStream createFile(FileSystem fileSys, Path name, int repl)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
System.out.println("createFile: Created " + name + " with " + repl + " replica.");
|
LOG.info("createFile: Created " + name + " with " + repl + " replica.");
|
||||||
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
|
FSDataOutputStream stm = fileSys.create(name, true, fileSys.getConf()
|
||||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096),
|
||||||
(short) repl, blockSize);
|
(short) repl, blockSize);
|
||||||
|
@ -305,8 +308,8 @@ public class TestFileCreation {
|
||||||
public void testFileCreationSetLocalInterface() throws IOException {
|
public void testFileCreationSetLocalInterface() throws IOException {
|
||||||
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
|
assumeTrue(System.getProperty("os.name").startsWith("Linux"));
|
||||||
|
|
||||||
// The mini cluster listens on the loopback so we can use it here
|
// Use wildcard address to force interface to be used
|
||||||
checkFileCreation("lo", false);
|
checkFileCreation("0.0.0.0", false);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
checkFileCreation("bogus-interface", false);
|
checkFileCreation("bogus-interface", false);
|
||||||
|
@ -348,9 +351,9 @@ public class TestFileCreation {
|
||||||
// check that / exists
|
// check that / exists
|
||||||
//
|
//
|
||||||
Path path = new Path("/");
|
Path path = new Path("/");
|
||||||
System.out.println("Path : \"" + path.toString() + "\"");
|
LOG.info("Path : \"" + path.toString() + "\"");
|
||||||
System.out.println(fs.getFileStatus(path).isDirectory());
|
LOG.info(fs.getFileStatus(path).isDirectory());
|
||||||
assertTrue("/ should be a directory",
|
assertTrue("/ should be a directory",
|
||||||
fs.getFileStatus(path).isDirectory());
|
fs.getFileStatus(path).isDirectory());
|
||||||
|
|
||||||
//
|
//
|
||||||
|
@ -358,8 +361,8 @@ public class TestFileCreation {
|
||||||
//
|
//
|
||||||
Path dir1 = new Path("/test_dir");
|
Path dir1 = new Path("/test_dir");
|
||||||
fs.mkdirs(dir1);
|
fs.mkdirs(dir1);
|
||||||
System.out.println("createFile: Creating " + dir1.getName() +
|
LOG.info("createFile: Creating " + dir1.getName()
|
||||||
" for overwrite of existing directory.");
|
+ " for overwrite of existing directory.");
|
||||||
try {
|
try {
|
||||||
fs.create(dir1, true); // Create path, overwrite=true
|
fs.create(dir1, true); // Create path, overwrite=true
|
||||||
fs.close();
|
fs.close();
|
||||||
|
@ -379,9 +382,9 @@ public class TestFileCreation {
|
||||||
FSDataOutputStream stm = createFile(fs, file1, 1);
|
FSDataOutputStream stm = createFile(fs, file1, 1);
|
||||||
|
|
||||||
// verify that file exists in FS namespace
|
// verify that file exists in FS namespace
|
||||||
assertTrue(file1 + " should be a file",
|
assertTrue(file1 + " should be a file",
|
||||||
fs.getFileStatus(file1).isFile());
|
fs.getFileStatus(file1).isFile());
|
||||||
System.out.println("Path : \"" + file1 + "\"");
|
LOG.info("Path : \"" + file1 + "\"");
|
||||||
|
|
||||||
// write to file
|
// write to file
|
||||||
writeFile(stm);
|
writeFile(stm);
|
||||||
|
@ -393,13 +396,13 @@ public class TestFileCreation {
|
||||||
assertTrue(file1 + " should be of size " + fileSize +
|
assertTrue(file1 + " should be of size " + fileSize +
|
||||||
" but found to be of size " + len,
|
" but found to be of size " + len,
|
||||||
len == fileSize);
|
len == fileSize);
|
||||||
|
|
||||||
// verify the disk space the file occupied
|
// verify the disk space the file occupied
|
||||||
long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
|
long diskSpace = dfs.getContentSummary(file1.getParent()).getLength();
|
||||||
assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
|
assertEquals(file1 + " should take " + fileSize + " bytes disk space " +
|
||||||
"but found to take " + diskSpace + " bytes", fileSize, diskSpace);
|
"but found to take " + diskSpace + " bytes", fileSize, diskSpace);
|
||||||
|
|
||||||
// Check storage usage
|
// Check storage usage
|
||||||
// can't check capacities for real storage since the OS file system may be changing under us.
|
// can't check capacities for real storage since the OS file system may be changing under us.
|
||||||
if (simulatedStorage) {
|
if (simulatedStorage) {
|
||||||
DataNode dn = cluster.getDataNodes().get(0);
|
DataNode dn = cluster.getDataNodes().get(0);
|
||||||
|
@ -436,7 +439,7 @@ public class TestFileCreation {
|
||||||
FSDataOutputStream stm1 = createFile(fs, file1, 1);
|
FSDataOutputStream stm1 = createFile(fs, file1, 1);
|
||||||
FSDataOutputStream stm2 = createFile(fs, file2, 1);
|
FSDataOutputStream stm2 = createFile(fs, file2, 1);
|
||||||
FSDataOutputStream stm3 = createFile(localfs, file3, 1);
|
FSDataOutputStream stm3 = createFile(localfs, file3, 1);
|
||||||
System.out.println("DeleteOnExit: Created files.");
|
LOG.info("DeleteOnExit: Created files.");
|
||||||
|
|
||||||
// write to files and close. Purposely, do not close file2.
|
// write to files and close. Purposely, do not close file2.
|
||||||
writeFile(stm1);
|
writeFile(stm1);
|
||||||
|
@ -467,7 +470,7 @@ public class TestFileCreation {
|
||||||
!fs.exists(file2));
|
!fs.exists(file2));
|
||||||
assertTrue(file3 + " still exists inspite of deletOnExit set.",
|
assertTrue(file3 + " still exists inspite of deletOnExit set.",
|
||||||
!localfs.exists(file3));
|
!localfs.exists(file3));
|
||||||
System.out.println("DeleteOnExit successful.");
|
LOG.info("DeleteOnExit successful.");
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.closeStream(fs);
|
IOUtils.closeStream(fs);
|
||||||
|
@ -563,7 +566,7 @@ public class TestFileCreation {
|
||||||
// verify that file exists in FS namespace
|
// verify that file exists in FS namespace
|
||||||
assertTrue(file1 + " should be a file",
|
assertTrue(file1 + " should be a file",
|
||||||
fs.getFileStatus(file1).isFile());
|
fs.getFileStatus(file1).isFile());
|
||||||
System.out.println("Path : \"" + file1 + "\"");
|
LOG.info("Path : \"" + file1 + "\"");
|
||||||
|
|
||||||
// kill the datanode
|
// kill the datanode
|
||||||
cluster.shutdownDataNodes();
|
cluster.shutdownDataNodes();
|
||||||
|
@ -575,7 +578,7 @@ public class TestFileCreation {
|
||||||
if (info.length == 0) {
|
if (info.length == 0) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
System.out.println("testFileCreationError1: waiting for datanode " +
|
LOG.info("testFileCreationError1: waiting for datanode " +
|
||||||
" to die.");
|
" to die.");
|
||||||
try {
|
try {
|
||||||
Thread.sleep(1000);
|
Thread.sleep(1000);
|
||||||
|
@ -597,7 +600,7 @@ public class TestFileCreation {
|
||||||
// bad block allocations were cleaned up earlier.
|
// bad block allocations were cleaned up earlier.
|
||||||
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
||||||
file1.toString(), 0, Long.MAX_VALUE);
|
file1.toString(), 0, Long.MAX_VALUE);
|
||||||
System.out.println("locations = " + locations.locatedBlockCount());
|
LOG.info("locations = " + locations.locatedBlockCount());
|
||||||
assertTrue("Error blocks were not cleaned up",
|
assertTrue("Error blocks were not cleaned up",
|
||||||
locations.locatedBlockCount() == 0);
|
locations.locatedBlockCount() == 0);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -613,7 +616,7 @@ public class TestFileCreation {
|
||||||
@Test
|
@Test
|
||||||
public void testFileCreationError2() throws IOException {
|
public void testFileCreationError2() throws IOException {
|
||||||
long leasePeriod = 1000;
|
long leasePeriod = 1000;
|
||||||
System.out.println("testFileCreationError2 start");
|
LOG.info("testFileCreationError2 start");
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
|
conf.setInt(DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
|
||||||
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
conf.setInt(DFS_HEARTBEAT_INTERVAL_KEY, 1);
|
||||||
|
@ -632,24 +635,24 @@ public class TestFileCreation {
|
||||||
//
|
//
|
||||||
Path file1 = new Path("/filestatus.dat");
|
Path file1 = new Path("/filestatus.dat");
|
||||||
createFile(dfs, file1, 1);
|
createFile(dfs, file1, 1);
|
||||||
System.out.println("testFileCreationError2: "
|
LOG.info("testFileCreationError2: "
|
||||||
+ "Created file filestatus.dat with one replicas.");
|
+ "Created file filestatus.dat with one replicas.");
|
||||||
|
|
||||||
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
||||||
file1.toString(), 0, Long.MAX_VALUE);
|
file1.toString(), 0, Long.MAX_VALUE);
|
||||||
System.out.println("testFileCreationError2: "
|
LOG.info("testFileCreationError2: "
|
||||||
+ "The file has " + locations.locatedBlockCount() + " blocks.");
|
+ "The file has " + locations.locatedBlockCount() + " blocks.");
|
||||||
|
|
||||||
// add one block to the file
|
// add one block to the file
|
||||||
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
|
LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
|
||||||
client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
|
client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
|
||||||
System.out.println("testFileCreationError2: "
|
LOG.info("testFileCreationError2: "
|
||||||
+ "Added block " + location.getBlock());
|
+ "Added block " + location.getBlock());
|
||||||
|
|
||||||
locations = client.getNamenode().getBlockLocations(file1.toString(),
|
locations = client.getNamenode().getBlockLocations(file1.toString(),
|
||||||
0, Long.MAX_VALUE);
|
0, Long.MAX_VALUE);
|
||||||
int count = locations.locatedBlockCount();
|
int count = locations.locatedBlockCount();
|
||||||
System.out.println("testFileCreationError2: "
|
LOG.info("testFileCreationError2: "
|
||||||
+ "The file now has " + count + " blocks.");
|
+ "The file now has " + count + " blocks.");
|
||||||
|
|
||||||
// set the soft and hard limit to be 1 second so that the
|
// set the soft and hard limit to be 1 second so that the
|
||||||
|
@ -665,10 +668,10 @@ public class TestFileCreation {
|
||||||
// verify that the last block was synchronized.
|
// verify that the last block was synchronized.
|
||||||
locations = client.getNamenode().getBlockLocations(file1.toString(),
|
locations = client.getNamenode().getBlockLocations(file1.toString(),
|
||||||
0, Long.MAX_VALUE);
|
0, Long.MAX_VALUE);
|
||||||
System.out.println("testFileCreationError2: "
|
LOG.info("testFileCreationError2: "
|
||||||
+ "locations = " + locations.locatedBlockCount());
|
+ "locations = " + locations.locatedBlockCount());
|
||||||
assertEquals(0, locations.locatedBlockCount());
|
assertEquals(0, locations.locatedBlockCount());
|
||||||
System.out.println("testFileCreationError2 successful");
|
LOG.info("testFileCreationError2 successful");
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.closeStream(dfs);
|
IOUtils.closeStream(dfs);
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
@ -678,7 +681,7 @@ public class TestFileCreation {
|
||||||
/** test addBlock(..) when replication<min and excludeNodes==null. */
|
/** test addBlock(..) when replication<min and excludeNodes==null. */
|
||||||
@Test
|
@Test
|
||||||
public void testFileCreationError3() throws IOException {
|
public void testFileCreationError3() throws IOException {
|
||||||
System.out.println("testFileCreationError3 start");
|
LOG.info("testFileCreationError3 start");
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
// create cluster
|
// create cluster
|
||||||
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
@ -699,7 +702,7 @@ public class TestFileCreation {
|
||||||
FileSystem.LOG.info("GOOD!", ioe);
|
FileSystem.LOG.info("GOOD!", ioe);
|
||||||
}
|
}
|
||||||
|
|
||||||
System.out.println("testFileCreationError3 successful");
|
LOG.info("testFileCreationError3 successful");
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.closeStream(dfs);
|
IOUtils.closeStream(dfs);
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
@ -732,7 +735,7 @@ public class TestFileCreation {
|
||||||
// create a new file.
|
// create a new file.
|
||||||
Path file1 = new Path("/filestatus.dat");
|
Path file1 = new Path("/filestatus.dat");
|
||||||
HdfsDataOutputStream stm = create(fs, file1, 1);
|
HdfsDataOutputStream stm = create(fs, file1, 1);
|
||||||
System.out.println("testFileCreationNamenodeRestart: "
|
LOG.info("testFileCreationNamenodeRestart: "
|
||||||
+ "Created file " + file1);
|
+ "Created file " + file1);
|
||||||
assertEquals(file1 + " should be replicated to 1 datanode.", 1,
|
assertEquals(file1 + " should be replicated to 1 datanode.", 1,
|
||||||
stm.getCurrentBlockReplication());
|
stm.getCurrentBlockReplication());
|
||||||
|
@ -746,7 +749,7 @@ public class TestFileCreation {
|
||||||
// rename file wile keeping it open.
|
// rename file wile keeping it open.
|
||||||
Path fileRenamed = new Path("/filestatusRenamed.dat");
|
Path fileRenamed = new Path("/filestatusRenamed.dat");
|
||||||
fs.rename(file1, fileRenamed);
|
fs.rename(file1, fileRenamed);
|
||||||
System.out.println("testFileCreationNamenodeRestart: "
|
LOG.info("testFileCreationNamenodeRestart: "
|
||||||
+ "Renamed file " + file1 + " to " +
|
+ "Renamed file " + file1 + " to " +
|
||||||
fileRenamed);
|
fileRenamed);
|
||||||
file1 = fileRenamed;
|
file1 = fileRenamed;
|
||||||
|
@ -755,7 +758,7 @@ public class TestFileCreation {
|
||||||
//
|
//
|
||||||
Path file2 = new Path("/filestatus2.dat");
|
Path file2 = new Path("/filestatus2.dat");
|
||||||
FSDataOutputStream stm2 = createFile(fs, file2, 1);
|
FSDataOutputStream stm2 = createFile(fs, file2, 1);
|
||||||
System.out.println("testFileCreationNamenodeRestart: "
|
LOG.info("testFileCreationNamenodeRestart: "
|
||||||
+ "Created file " + file2);
|
+ "Created file " + file2);
|
||||||
|
|
||||||
// create yet another new file with full path name.
|
// create yet another new file with full path name.
|
||||||
|
@ -763,21 +766,21 @@ public class TestFileCreation {
|
||||||
//
|
//
|
||||||
Path file3 = new Path("/user/home/fullpath.dat");
|
Path file3 = new Path("/user/home/fullpath.dat");
|
||||||
FSDataOutputStream stm3 = createFile(fs, file3, 1);
|
FSDataOutputStream stm3 = createFile(fs, file3, 1);
|
||||||
System.out.println("testFileCreationNamenodeRestart: "
|
LOG.info("testFileCreationNamenodeRestart: "
|
||||||
+ "Created file " + file3);
|
+ "Created file " + file3);
|
||||||
Path file4 = new Path("/user/home/fullpath4.dat");
|
Path file4 = new Path("/user/home/fullpath4.dat");
|
||||||
FSDataOutputStream stm4 = createFile(fs, file4, 1);
|
FSDataOutputStream stm4 = createFile(fs, file4, 1);
|
||||||
System.out.println("testFileCreationNamenodeRestart: "
|
LOG.info("testFileCreationNamenodeRestart: "
|
||||||
+ "Created file " + file4);
|
+ "Created file " + file4);
|
||||||
|
|
||||||
fs.mkdirs(new Path("/bin"));
|
fs.mkdirs(new Path("/bin"));
|
||||||
fs.rename(new Path("/user/home"), new Path("/bin"));
|
fs.rename(new Path("/user/home"), new Path("/bin"));
|
||||||
Path file3new = new Path("/bin/home/fullpath.dat");
|
Path file3new = new Path("/bin/home/fullpath.dat");
|
||||||
System.out.println("testFileCreationNamenodeRestart: "
|
LOG.info("testFileCreationNamenodeRestart: "
|
||||||
+ "Renamed file " + file3 + " to " +
|
+ "Renamed file " + file3 + " to " +
|
||||||
file3new);
|
file3new);
|
||||||
Path file4new = new Path("/bin/home/fullpath4.dat");
|
Path file4new = new Path("/bin/home/fullpath4.dat");
|
||||||
System.out.println("testFileCreationNamenodeRestart: "
|
LOG.info("testFileCreationNamenodeRestart: "
|
||||||
+ "Renamed file " + file4 + " to " +
|
+ "Renamed file " + file4 + " to " +
|
||||||
file4new);
|
file4new);
|
||||||
|
|
||||||
|
@ -837,14 +840,14 @@ public class TestFileCreation {
|
||||||
DFSClient client = fs.dfs;
|
DFSClient client = fs.dfs;
|
||||||
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
LocatedBlocks locations = client.getNamenode().getBlockLocations(
|
||||||
file1.toString(), 0, Long.MAX_VALUE);
|
file1.toString(), 0, Long.MAX_VALUE);
|
||||||
System.out.println("locations = " + locations.locatedBlockCount());
|
LOG.info("locations = " + locations.locatedBlockCount());
|
||||||
assertTrue("Error blocks were not cleaned up for file " + file1,
|
assertTrue("Error blocks were not cleaned up for file " + file1,
|
||||||
locations.locatedBlockCount() == 3);
|
locations.locatedBlockCount() == 3);
|
||||||
|
|
||||||
// verify filestatus2.dat
|
// verify filestatus2.dat
|
||||||
locations = client.getNamenode().getBlockLocations(
|
locations = client.getNamenode().getBlockLocations(
|
||||||
file2.toString(), 0, Long.MAX_VALUE);
|
file2.toString(), 0, Long.MAX_VALUE);
|
||||||
System.out.println("locations = " + locations.locatedBlockCount());
|
LOG.info("locations = " + locations.locatedBlockCount());
|
||||||
assertTrue("Error blocks were not cleaned up for file " + file2,
|
assertTrue("Error blocks were not cleaned up for file " + file2,
|
||||||
locations.locatedBlockCount() == 1);
|
locations.locatedBlockCount() == 1);
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -859,7 +862,7 @@ public class TestFileCreation {
|
||||||
@Test
|
@Test
|
||||||
public void testDFSClientDeath() throws IOException, InterruptedException {
|
public void testDFSClientDeath() throws IOException, InterruptedException {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
System.out.println("Testing adbornal client death.");
|
LOG.info("Testing adbornal client death.");
|
||||||
if (simulatedStorage) {
|
if (simulatedStorage) {
|
||||||
SimulatedFSDataset.setFactory(conf);
|
SimulatedFSDataset.setFactory(conf);
|
||||||
}
|
}
|
||||||
|
@ -873,7 +876,7 @@ public class TestFileCreation {
|
||||||
//
|
//
|
||||||
Path file1 = new Path("/clienttest.dat");
|
Path file1 = new Path("/clienttest.dat");
|
||||||
FSDataOutputStream stm = createFile(fs, file1, 1);
|
FSDataOutputStream stm = createFile(fs, file1, 1);
|
||||||
System.out.println("Created file clienttest.dat");
|
LOG.info("Created file clienttest.dat");
|
||||||
|
|
||||||
// write to file
|
// write to file
|
||||||
writeFile(stm);
|
writeFile(stm);
|
||||||
|
@ -889,7 +892,7 @@ public class TestFileCreation {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Test file creation using createNonRecursive().
|
* Test file creation using createNonRecursive().
|
||||||
*/
|
*/
|
||||||
|
@ -970,7 +973,7 @@ public class TestFileCreation {
|
||||||
static IOException createNonRecursive(FileSystem fs, Path name,
|
static IOException createNonRecursive(FileSystem fs, Path name,
|
||||||
int repl, EnumSet<CreateFlag> flag) throws IOException {
|
int repl, EnumSet<CreateFlag> flag) throws IOException {
|
||||||
try {
|
try {
|
||||||
System.out.println("createNonRecursive: Attempting to create " + name +
|
LOG.info("createNonRecursive: Attempting to create " + name +
|
||||||
" with " + repl + " replica.");
|
" with " + repl + " replica.");
|
||||||
int bufferSize = fs.getConf()
|
int bufferSize = fs.getConf()
|
||||||
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
.getInt(CommonConfigurationKeys.IO_FILE_BUFFER_SIZE_KEY, 4096);
|
||||||
|
@ -1004,9 +1007,9 @@ public class TestFileCreation {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
|
||||||
Path[] p = {new Path("/foo"), new Path("/bar")};
|
Path[] p = {new Path("/foo"), new Path("/bar")};
|
||||||
|
|
||||||
//write 2 files at the same time
|
//write 2 files at the same time
|
||||||
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
|
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
@ -1038,9 +1041,9 @@ public class TestFileCreation {
|
||||||
|
|
||||||
try {
|
try {
|
||||||
FileSystem fs = cluster.getFileSystem();
|
FileSystem fs = cluster.getFileSystem();
|
||||||
|
|
||||||
Path[] p = {new Path("/foo"), new Path("/bar")};
|
Path[] p = {new Path("/foo"), new Path("/bar")};
|
||||||
|
|
||||||
//write 2 files at the same time
|
//write 2 files at the same time
|
||||||
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
|
FSDataOutputStream[] out = {fs.create(p[0]), fs.create(p[1])};
|
||||||
int i = 0;
|
int i = 0;
|
||||||
|
@ -1068,7 +1071,7 @@ public class TestFileCreation {
|
||||||
*/
|
*/
|
||||||
@Test
|
@Test
|
||||||
public void testLeaseExpireHardLimit() throws Exception {
|
public void testLeaseExpireHardLimit() throws Exception {
|
||||||
System.out.println("testLeaseExpireHardLimit start");
|
LOG.info("testLeaseExpireHardLimit start");
|
||||||
final long leasePeriod = 1000;
|
final long leasePeriod = 1000;
|
||||||
final int DATANODE_NUM = 3;
|
final int DATANODE_NUM = 3;
|
||||||
|
|
||||||
|
@ -1113,20 +1116,20 @@ public class TestFileCreation {
|
||||||
successcount++;
|
successcount++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
System.out.println("successcount=" + successcount);
|
LOG.info("successcount=" + successcount);
|
||||||
assertTrue(successcount > 0);
|
assertTrue(successcount > 0);
|
||||||
} finally {
|
} finally {
|
||||||
IOUtils.closeStream(dfs);
|
IOUtils.closeStream(dfs);
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
|
||||||
System.out.println("testLeaseExpireHardLimit successful");
|
LOG.info("testLeaseExpireHardLimit successful");
|
||||||
}
|
}
|
||||||
|
|
||||||
// test closing file system before all file handles are closed.
|
// test closing file system before all file handles are closed.
|
||||||
@Test
|
@Test
|
||||||
public void testFsClose() throws Exception {
|
public void testFsClose() throws Exception {
|
||||||
System.out.println("test file system close start");
|
LOG.info("test file system close start");
|
||||||
final int DATANODE_NUM = 3;
|
final int DATANODE_NUM = 3;
|
||||||
|
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
@ -1147,7 +1150,7 @@ public class TestFileCreation {
|
||||||
// close file system without closing file
|
// close file system without closing file
|
||||||
dfs.close();
|
dfs.close();
|
||||||
} finally {
|
} finally {
|
||||||
System.out.println("testFsClose successful");
|
LOG.info("testFsClose successful");
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1155,7 +1158,7 @@ public class TestFileCreation {
|
||||||
// test closing file after cluster is shutdown
|
// test closing file after cluster is shutdown
|
||||||
@Test
|
@Test
|
||||||
public void testFsCloseAfterClusterShutdown() throws IOException {
|
public void testFsCloseAfterClusterShutdown() throws IOException {
|
||||||
System.out.println("test testFsCloseAfterClusterShutdown start");
|
LOG.info("test testFsCloseAfterClusterShutdown start");
|
||||||
final int DATANODE_NUM = 3;
|
final int DATANODE_NUM = 3;
|
||||||
|
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
|
@ -1186,13 +1189,13 @@ public class TestFileCreation {
|
||||||
boolean hasException = false;
|
boolean hasException = false;
|
||||||
try {
|
try {
|
||||||
out.close();
|
out.close();
|
||||||
System.out.println("testFsCloseAfterClusterShutdown: Error here");
|
LOG.info("testFsCloseAfterClusterShutdown: Error here");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
hasException = true;
|
hasException = true;
|
||||||
}
|
}
|
||||||
assertTrue("Failed to close file after cluster shutdown", hasException);
|
assertTrue("Failed to close file after cluster shutdown", hasException);
|
||||||
} finally {
|
} finally {
|
||||||
System.out.println("testFsCloseAfterClusterShutdown successful");
|
LOG.info("testFsCloseAfterClusterShutdown successful");
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
|
@ -1211,7 +1214,7 @@ public class TestFileCreation {
|
||||||
public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
|
public void testCreateNonCanonicalPathAndRestartRpc() throws Exception {
|
||||||
doCreateTest(CreationMethod.DIRECT_NN_RPC);
|
doCreateTest(CreationMethod.DIRECT_NN_RPC);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Another regression test for HDFS-3626. This one creates files using
|
* Another regression test for HDFS-3626. This one creates files using
|
||||||
* a Path instantiated from a string object.
|
* a Path instantiated from a string object.
|
||||||
|
@ -1231,7 +1234,7 @@ public class TestFileCreation {
|
||||||
throws Exception {
|
throws Exception {
|
||||||
doCreateTest(CreationMethod.PATH_FROM_URI);
|
doCreateTest(CreationMethod.PATH_FROM_URI);
|
||||||
}
|
}
|
||||||
|
|
||||||
private enum CreationMethod {
|
private enum CreationMethod {
|
||||||
DIRECT_NN_RPC,
|
DIRECT_NN_RPC,
|
||||||
PATH_FROM_URI,
|
PATH_FROM_URI,
|
||||||
|
@ -1246,7 +1249,7 @@ public class TestFileCreation {
|
||||||
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
|
NamenodeProtocols nnrpc = cluster.getNameNodeRpc();
|
||||||
|
|
||||||
for (String pathStr : NON_CANONICAL_PATHS) {
|
for (String pathStr : NON_CANONICAL_PATHS) {
|
||||||
System.out.println("Creating " + pathStr + " by " + method);
|
LOG.info("Creating " + pathStr + " by " + method);
|
||||||
switch (method) {
|
switch (method) {
|
||||||
case DIRECT_NN_RPC:
|
case DIRECT_NN_RPC:
|
||||||
try {
|
try {
|
||||||
|
@ -1261,7 +1264,7 @@ public class TestFileCreation {
|
||||||
// So, we expect all of them to fail.
|
// So, we expect all of them to fail.
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
|
||||||
case PATH_FROM_URI:
|
case PATH_FROM_URI:
|
||||||
case PATH_FROM_STRING:
|
case PATH_FROM_STRING:
|
||||||
// Unlike the above direct-to-NN case, we expect these to succeed,
|
// Unlike the above direct-to-NN case, we expect these to succeed,
|
||||||
|
@ -1279,7 +1282,7 @@ public class TestFileCreation {
|
||||||
throw new AssertionError("bad method: " + method);
|
throw new AssertionError("bad method: " + method);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cluster.restartNameNode();
|
cluster.restartNameNode();
|
||||||
|
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -1336,7 +1339,7 @@ public class TestFileCreation {
|
||||||
dfs.mkdirs(new Path("/foo/dir"));
|
dfs.mkdirs(new Path("/foo/dir"));
|
||||||
String file = "/foo/dir/file";
|
String file = "/foo/dir/file";
|
||||||
Path filePath = new Path(file);
|
Path filePath = new Path(file);
|
||||||
|
|
||||||
// Case 1: Create file with overwrite, check the blocks of old file
|
// Case 1: Create file with overwrite, check the blocks of old file
|
||||||
// are cleaned after creating with overwrite
|
// are cleaned after creating with overwrite
|
||||||
NameNode nn = cluster.getNameNode();
|
NameNode nn = cluster.getNameNode();
|
||||||
|
@ -1350,7 +1353,7 @@ public class TestFileCreation {
|
||||||
} finally {
|
} finally {
|
||||||
out.close();
|
out.close();
|
||||||
}
|
}
|
||||||
|
|
||||||
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
|
LocatedBlocks oldBlocks = NameNodeAdapter.getBlockLocations(
|
||||||
nn, file, 0, fileSize);
|
nn, file, 0, fileSize);
|
||||||
assertBlocks(bm, oldBlocks, true);
|
assertBlocks(bm, oldBlocks, true);
|
||||||
|
@ -1363,7 +1366,7 @@ public class TestFileCreation {
|
||||||
out.close();
|
out.close();
|
||||||
}
|
}
|
||||||
dfs.deleteOnExit(filePath);
|
dfs.deleteOnExit(filePath);
|
||||||
|
|
||||||
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
|
LocatedBlocks newBlocks = NameNodeAdapter.getBlockLocations(
|
||||||
nn, file, 0, fileSize);
|
nn, file, 0, fileSize);
|
||||||
assertBlocks(bm, newBlocks, true);
|
assertBlocks(bm, newBlocks, true);
|
||||||
|
@ -1377,7 +1380,7 @@ public class TestFileCreation {
|
||||||
in.close();
|
in.close();
|
||||||
}
|
}
|
||||||
Assert.assertArrayEquals(newData, result);
|
Assert.assertArrayEquals(newData, result);
|
||||||
|
|
||||||
// Case 2: Restart NN, check the file
|
// Case 2: Restart NN, check the file
|
||||||
cluster.restartNameNode();
|
cluster.restartNameNode();
|
||||||
nn = cluster.getNameNode();
|
nn = cluster.getNameNode();
|
||||||
|
@ -1388,13 +1391,13 @@ public class TestFileCreation {
|
||||||
in.close();
|
in.close();
|
||||||
}
|
}
|
||||||
Assert.assertArrayEquals(newData, result);
|
Assert.assertArrayEquals(newData, result);
|
||||||
|
|
||||||
// Case 3: Save new checkpoint and restart NN, check the file
|
// Case 3: Save new checkpoint and restart NN, check the file
|
||||||
NameNodeAdapter.enterSafeMode(nn, false);
|
NameNodeAdapter.enterSafeMode(nn, false);
|
||||||
NameNodeAdapter.saveNamespace(nn);
|
NameNodeAdapter.saveNamespace(nn);
|
||||||
cluster.restartNameNode();
|
cluster.restartNameNode();
|
||||||
nn = cluster.getNameNode();
|
nn = cluster.getNameNode();
|
||||||
|
|
||||||
in = dfs.open(filePath);
|
in = dfs.open(filePath);
|
||||||
try {
|
try {
|
||||||
result = readAll(in);
|
result = readAll(in);
|
||||||
|
@ -1411,8 +1414,8 @@ public class TestFileCreation {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
|
private void assertBlocks(BlockManager bm, LocatedBlocks lbs,
|
||||||
boolean exist) {
|
boolean exist) {
|
||||||
for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
|
for (LocatedBlock locatedBlock : lbs.getLocatedBlocks()) {
|
||||||
if (exist) {
|
if (exist) {
|
||||||
|
@ -1424,7 +1427,7 @@ public class TestFileCreation {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private byte[] readAll(FSDataInputStream in) throws IOException {
|
private byte[] readAll(FSDataInputStream in) throws IOException {
|
||||||
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
ByteArrayOutputStream out = new ByteArrayOutputStream();
|
||||||
byte[] buffer = new byte[1024];
|
byte[] buffer = new byte[1024];
|
||||||
|
|
|
@ -195,7 +195,8 @@ public class BlockReaderTestUtil {
|
||||||
return new BlockReaderFactory(fs.getClient().getConf()).
|
return new BlockReaderFactory(fs.getClient().getConf()).
|
||||||
setInetSocketAddress(targetAddr).
|
setInetSocketAddress(targetAddr).
|
||||||
setBlock(block).
|
setBlock(block).
|
||||||
setFileName(targetAddr.toString()+ ":" + block.getBlockId()).
|
setFileName(NetUtils.getSocketAddressString(targetAddr) + ":" + block
|
||||||
|
.getBlockId()).
|
||||||
setBlockToken(testBlock.getBlockToken()).
|
setBlockToken(testBlock.getBlockToken()).
|
||||||
setStartOffset(offset).
|
setStartOffset(offset).
|
||||||
setLength(lenToRead).
|
setLength(lenToRead).
|
||||||
|
|
|
@ -921,14 +921,14 @@ public class TestQuorumJournalManager {
|
||||||
GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
|
GenericTestUtils.assertGlobEquals(paxosDir, "\\d+",
|
||||||
"3");
|
"3");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testToString() throws Exception {
|
public void testToString() throws Exception {
|
||||||
GenericTestUtils.assertMatches(
|
GenericTestUtils.assertMatches(
|
||||||
qjm.toString(),
|
qjm.toString(),
|
||||||
"QJM to \\[127.0.0.1:\\d+, 127.0.0.1:\\d+, 127.0.0.1:\\d+\\]");
|
"QJM to \\[localhost:\\d+, localhost:\\d+, localhost:\\d+\\]");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testSelectInputStreamsNotOnBoundary() throws Exception {
|
public void testSelectInputStreamsNotOnBoundary() throws Exception {
|
||||||
final int txIdsPerSegment = 10;
|
final int txIdsPerSegment = 10;
|
||||||
|
|
|
@ -110,13 +110,19 @@ public class TestHostFileManager {
|
||||||
includedNodes.add(entry("127.0.0.1:12345"));
|
includedNodes.add(entry("127.0.0.1:12345"));
|
||||||
includedNodes.add(entry("localhost:12345"));
|
includedNodes.add(entry("localhost:12345"));
|
||||||
includedNodes.add(entry("127.0.0.1:12345"));
|
includedNodes.add(entry("127.0.0.1:12345"));
|
||||||
|
|
||||||
|
includedNodes.add(entry("[::1]:42"));
|
||||||
|
includedNodes.add(entry("[0:0:0:0:0:0:0:1]:42"));
|
||||||
|
includedNodes.add(entry("[::1]:42"));
|
||||||
|
|
||||||
includedNodes.add(entry("127.0.0.2"));
|
includedNodes.add(entry("127.0.0.2"));
|
||||||
|
|
||||||
excludedNodes.add(entry("127.0.0.1:12346"));
|
excludedNodes.add(entry("127.0.0.1:12346"));
|
||||||
excludedNodes.add(entry("127.0.30.1:12346"));
|
excludedNodes.add(entry("127.0.30.1:12346"));
|
||||||
|
excludedNodes.add(entry("[::1]:24"));
|
||||||
|
|
||||||
Assert.assertEquals(2, includedNodes.size());
|
Assert.assertEquals(3, includedNodes.size());
|
||||||
Assert.assertEquals(2, excludedNodes.size());
|
Assert.assertEquals(3, excludedNodes.size());
|
||||||
|
|
||||||
hm.refresh(includedNodes, excludedNodes);
|
hm.refresh(includedNodes, excludedNodes);
|
||||||
|
|
||||||
|
@ -125,20 +131,33 @@ public class TestHostFileManager {
|
||||||
Map<String, DatanodeDescriptor> dnMap = (Map<String,
|
Map<String, DatanodeDescriptor> dnMap = (Map<String,
|
||||||
DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
|
DatanodeDescriptor>) Whitebox.getInternalState(dm, "datanodeMap");
|
||||||
|
|
||||||
// After the de-duplication, there should be only one DN from the included
|
// After the de-duplication, there should be three DN from the included
|
||||||
// nodes declared as dead.
|
// nodes declared as dead.
|
||||||
Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
|
Assert.assertEquals(3,
|
||||||
.DatanodeReportType.ALL).size());
|
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.ALL)
|
||||||
Assert.assertEquals(2, dm.getDatanodeListForReport(HdfsConstants
|
.size());
|
||||||
.DatanodeReportType.DEAD).size());
|
Assert.assertEquals(3,
|
||||||
dnMap.put("uuid-foo", new DatanodeDescriptor(new DatanodeID("127.0.0.1",
|
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||||
"localhost", "uuid-foo", 12345, 1020, 1021, 1022)));
|
.size());
|
||||||
Assert.assertEquals(1, dm.getDatanodeListForReport(HdfsConstants
|
dnMap.put("uuid-foo", new DatanodeDescriptor(
|
||||||
.DatanodeReportType.DEAD).size());
|
new DatanodeID("127.0.0.1", "localhost", "uuid-foo", 12345, 1020, 1021,
|
||||||
dnMap.put("uuid-bar", new DatanodeDescriptor(new DatanodeID("127.0.0.2",
|
1022)));
|
||||||
"127.0.0.2", "uuid-bar", 12345, 1020, 1021, 1022)));
|
Assert.assertEquals(2,
|
||||||
Assert.assertEquals(0, dm.getDatanodeListForReport(HdfsConstants
|
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||||
.DatanodeReportType.DEAD).size());
|
.size());
|
||||||
|
dnMap.put("uuid-bar", new DatanodeDescriptor(
|
||||||
|
new DatanodeID("127.0.0.2", "127.0.0.2", "uuid-bar", 12345, 1020, 1021,
|
||||||
|
1022)));
|
||||||
|
Assert.assertEquals(1,
|
||||||
|
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||||
|
.size());
|
||||||
|
dnMap.put("uuid-baz", new DatanodeDescriptor(
|
||||||
|
new DatanodeID("[::1]", "localhost", "uuid-baz", 42, 1020, 1021,
|
||||||
|
1022)));
|
||||||
|
Assert.assertEquals(0,
|
||||||
|
dm.getDatanodeListForReport(HdfsConstants.DatanodeReportType.DEAD)
|
||||||
|
.size());
|
||||||
|
|
||||||
DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
|
DatanodeDescriptor spam = new DatanodeDescriptor(new DatanodeID("127.0.0" +
|
||||||
".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
|
".3", "127.0.0.3", "uuid-spam", 12345, 1020, 1021, 1022));
|
||||||
DFSTestUtil.setDatanodeDead(spam);
|
DFSTestUtil.setDatanodeDead(spam);
|
||||||
|
|
|
@ -22,6 +22,8 @@ import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.UnknownHostException;
|
||||||
import java.util.Random;
|
import java.util.Random;
|
||||||
import java.util.UUID;
|
import java.util.UUID;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
|
@ -56,13 +58,37 @@ public class TestBlockPoolSliceStorage {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private String makeRandomIpAddress() {
|
private String makeRandomIpv4Address() {
|
||||||
return rand.nextInt(256) + "." +
|
return rand.nextInt(256) + "." +
|
||||||
rand.nextInt(256) + "." +
|
rand.nextInt(256) + "." +
|
||||||
rand.nextInt(256) + "." +
|
rand.nextInt(256) + "." +
|
||||||
rand.nextInt(256);
|
rand.nextInt(256);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private String makeRandomIpv6Address() {
|
||||||
|
byte[] bytes = new byte[16];
|
||||||
|
rand.nextBytes(bytes);
|
||||||
|
InetAddress adr = null;
|
||||||
|
try {
|
||||||
|
adr = InetAddress.getByAddress("unused", bytes);
|
||||||
|
} catch (UnknownHostException uhe) {
|
||||||
|
// Should never happen
|
||||||
|
LOG.error("UnknownHostException " + uhe);
|
||||||
|
assertThat(true, is(false));
|
||||||
|
}
|
||||||
|
String addrString = adr.getHostAddress().replaceAll(":", ".");
|
||||||
|
|
||||||
|
return "[" + addrString + "]";
|
||||||
|
}
|
||||||
|
|
||||||
|
private String makeRandomIpAddress() {
|
||||||
|
if (rand.nextBoolean()) {
|
||||||
|
return makeRandomIpv4Address();
|
||||||
|
} else {
|
||||||
|
return makeRandomIpv6Address();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private String makeRandomBlockpoolId() {
|
private String makeRandomBlockpoolId() {
|
||||||
return "BP-" + rand.nextInt(Integer.MAX_VALUE) +
|
return "BP-" + rand.nextInt(Integer.MAX_VALUE) +
|
||||||
"-" + makeRandomIpAddress() +
|
"-" + makeRandomIpAddress() +
|
||||||
|
|
|
@ -151,14 +151,15 @@ public class TestHostsFiles {
|
||||||
|
|
||||||
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
HostsFileWriter hostsFileWriter = new HostsFileWriter();
|
||||||
hostsFileWriter.initialize(conf, "temp/decommission");
|
hostsFileWriter.initialize(conf, "temp/decommission");
|
||||||
hostsFileWriter.initIncludeHosts(new String[]
|
hostsFileWriter.initIncludeHosts(
|
||||||
{"localhost:52","127.0.0.1:7777"});
|
new String[] {"localhost:52", "127.0.0.1:7777", "[::1]:42",
|
||||||
|
"[0:0:0:0:0:0:0:1]:24"});
|
||||||
|
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
|
final FSNamesystem ns = cluster.getNameNode().getNamesystem();
|
||||||
assertTrue(ns.getNumDeadDataNodes() == 2);
|
assertTrue(ns.getNumDeadDataNodes() == 4);
|
||||||
assertTrue(ns.getNumLiveDataNodes() == 0);
|
assertTrue(ns.getNumLiveDataNodes() == 0);
|
||||||
|
|
||||||
// Testing using MBeans
|
// Testing using MBeans
|
||||||
|
@ -166,7 +167,7 @@ public class TestHostsFiles {
|
||||||
ObjectName mxbeanName = new ObjectName(
|
ObjectName mxbeanName = new ObjectName(
|
||||||
"Hadoop:service=NameNode,name=FSNamesystemState");
|
"Hadoop:service=NameNode,name=FSNamesystemState");
|
||||||
String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
|
String nodes = mbs.getAttribute(mxbeanName, "NumDeadDataNodes") + "";
|
||||||
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 2);
|
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumDeadDataNodes") == 4);
|
||||||
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
|
assertTrue((Integer) mbs.getAttribute(mxbeanName, "NumLiveDataNodes") == 0);
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
|
|
|
@ -20,8 +20,10 @@ package org.apache.hadoop.hdfs.server.namenode;
|
||||||
import static org.junit.Assert.assertFalse;
|
import static org.junit.Assert.assertFalse;
|
||||||
import static org.junit.Assert.assertThat;
|
import static org.junit.Assert.assertThat;
|
||||||
import static org.hamcrest.core.Is.is;
|
import static org.hamcrest.core.Is.is;
|
||||||
|
import static org.hamcrest.core.AnyOf.anyOf;
|
||||||
import static org.hamcrest.core.IsNot.not;
|
import static org.hamcrest.core.IsNot.not;
|
||||||
|
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import org.apache.hadoop.fs.FileUtil;
|
import org.apache.hadoop.fs.FileUtil;
|
||||||
|
@ -30,6 +32,8 @@ import org.apache.hadoop.http.HttpConfig;
|
||||||
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
|
||||||
import org.apache.hadoop.test.GenericTestUtils;
|
import org.apache.hadoop.test.GenericTestUtils;
|
||||||
|
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.Inet6Address;
|
||||||
import java.io.File;
|
import java.io.File;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -55,6 +59,7 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
public static final Logger LOG =
|
public static final Logger LOG =
|
||||||
LoggerFactory.getLogger(TestNameNodeRespectsBindHostKeys.class);
|
LoggerFactory.getLogger(TestNameNodeRespectsBindHostKeys.class);
|
||||||
private static final String WILDCARD_ADDRESS = "0.0.0.0";
|
private static final String WILDCARD_ADDRESS = "0.0.0.0";
|
||||||
|
private static final String IPV6_WILDCARD_ADDRESS = "0:0:0:0:0:0:0:0";
|
||||||
private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
|
private static final String LOCALHOST_SERVER_ADDRESS = "127.0.0.1:0";
|
||||||
private static String keystoresDir;
|
private static String keystoresDir;
|
||||||
private static String sslConfDir;
|
private static String sslConfDir;
|
||||||
|
@ -79,9 +84,9 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
public void testRpcBindHostKey() throws IOException {
|
public void testRpcBindHostKey() throws IOException {
|
||||||
Configuration conf = new HdfsConfiguration();
|
Configuration conf = new HdfsConfiguration();
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
|
LOG.info("Testing without " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
|
||||||
|
|
||||||
// NN should not bind the wildcard address by default.
|
// NN should not bind the wildcard address by default.
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
@ -97,7 +102,7 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
|
LOG.info("Testing with " + DFS_NAMENODE_RPC_BIND_HOST_KEY);
|
||||||
|
|
||||||
// Tell NN to bind the wildcard address.
|
// Tell NN to bind the wildcard address.
|
||||||
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
|
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
|
||||||
|
|
||||||
|
@ -106,13 +111,36 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
String address = getRpcServerAddress(cluster);
|
String address = getRpcServerAddress(cluster);
|
||||||
assertThat("Bind address " + address + " is not wildcard.",
|
assertThat("Bind address " + address + " is not wildcard.", address,
|
||||||
address, is("/" + WILDCARD_ADDRESS));
|
anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
InetAddress localAddr = InetAddress.getLocalHost();
|
||||||
|
if (localAddr instanceof Inet6Address) {
|
||||||
|
// Tell NN to bind the IPv6 wildcard address.
|
||||||
|
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPV6_WILDCARD_ADDRESS);
|
||||||
|
|
||||||
|
// Verify that NN binds wildcard address now.
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
String address = getRpcServerAddress(cluster);
|
||||||
|
assertThat("Bind address " + address + " is not wildcard.",
|
||||||
|
address, anyOf(
|
||||||
|
is("/" + WILDCARD_ADDRESS),
|
||||||
|
is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG.info("Not testing IPv6 binding as IPv6 us not supported");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test (timeout=300000)
|
@Test (timeout=300000)
|
||||||
|
@ -121,7 +149,7 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
|
LOG.info("Testing without " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
|
||||||
|
|
||||||
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||||
|
|
||||||
// NN should not bind the wildcard address by default.
|
// NN should not bind the wildcard address by default.
|
||||||
|
@ -140,6 +168,27 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
|
|
||||||
LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
|
LOG.info("Testing with " + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY);
|
||||||
|
|
||||||
|
InetAddress localAddr = InetAddress.getLocalHost();
|
||||||
|
if (localAddr instanceof Inet6Address) {
|
||||||
|
// Tell NN to bind the IPv6 wildcard address.
|
||||||
|
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, IPV6_WILDCARD_ADDRESS);
|
||||||
|
|
||||||
|
// Verify that NN binds wildcard address now.
|
||||||
|
try {
|
||||||
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
|
cluster.waitActive();
|
||||||
|
String address = getRpcServerAddress(cluster);
|
||||||
|
assertThat("Bind address " + address + " is not wildcard.", address,
|
||||||
|
anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||||
|
} finally {
|
||||||
|
if (cluster != null) {
|
||||||
|
cluster.shutdown();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
LOG.info("Not testing IPv6 binding as IPv6 us not supported");
|
||||||
|
}
|
||||||
|
|
||||||
// Tell NN to bind the wildcard address.
|
// Tell NN to bind the wildcard address.
|
||||||
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
|
conf.set(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, WILDCARD_ADDRESS);
|
||||||
|
|
||||||
|
@ -148,8 +197,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
String address = getServiceRpcServerAddress(cluster);
|
String address = getServiceRpcServerAddress(cluster);
|
||||||
assertThat("Bind address " + address + " is not wildcard.",
|
assertThat("Bind address " + address + " is not wildcard.", address,
|
||||||
address, is("/" + WILDCARD_ADDRESS));
|
anyOf(is("/" + WILDCARD_ADDRESS), is("/" + IPV6_WILDCARD_ADDRESS)));
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
@ -211,7 +260,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
String address = cluster.getNameNode().getHttpAddress().toString();
|
String address = NetUtils.getSocketAddressString(
|
||||||
|
cluster.getNameNode().getHttpAddress());
|
||||||
assertFalse("HTTP Bind address not expected to be wildcard by default.",
|
assertFalse("HTTP Bind address not expected to be wildcard by default.",
|
||||||
address.startsWith(WILDCARD_ADDRESS));
|
address.startsWith(WILDCARD_ADDRESS));
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -231,7 +281,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
String address = cluster.getNameNode().getHttpAddress().toString();
|
String address = NetUtils.getSocketAddressString(
|
||||||
|
cluster.getNameNode().getHttpAddress());
|
||||||
assertTrue("HTTP Bind address " + address + " is not wildcard.",
|
assertTrue("HTTP Bind address " + address + " is not wildcard.",
|
||||||
address.startsWith(WILDCARD_ADDRESS));
|
address.startsWith(WILDCARD_ADDRESS));
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -285,7 +336,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
String address = cluster.getNameNode().getHttpsAddress().toString();
|
String address = NetUtils.getSocketAddressString(
|
||||||
|
cluster.getNameNode().getHttpsAddress());
|
||||||
assertFalse("HTTP Bind address not expected to be wildcard by default.",
|
assertFalse("HTTP Bind address not expected to be wildcard by default.",
|
||||||
address.startsWith(WILDCARD_ADDRESS));
|
address.startsWith(WILDCARD_ADDRESS));
|
||||||
} finally {
|
} finally {
|
||||||
|
@ -305,7 +357,8 @@ public class TestNameNodeRespectsBindHostKeys {
|
||||||
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
conf.set(DFS_NAMENODE_HTTPS_ADDRESS_KEY, LOCALHOST_SERVER_ADDRESS);
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
String address = cluster.getNameNode().getHttpsAddress().toString();
|
String address = NetUtils
|
||||||
|
.getSocketAddressString(cluster.getNameNode().getHttpsAddress());
|
||||||
assertTrue("HTTP Bind address " + address + " is not wildcard.",
|
assertTrue("HTTP Bind address " + address + " is not wildcard.",
|
||||||
address.startsWith(WILDCARD_ADDRESS));
|
address.startsWith(WILDCARD_ADDRESS));
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -25,7 +25,9 @@
|
||||||
package org.apache.hadoop.hdfs.server.namenode;
|
package org.apache.hadoop.hdfs.server.namenode;
|
||||||
|
|
||||||
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY;
|
||||||
import static org.junit.Assert.assertEquals;
|
import static org.junit.Assert.assertThat;
|
||||||
|
import static org.hamcrest.core.Is.is;
|
||||||
|
import static org.hamcrest.core.AnyOf.anyOf;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
@ -43,14 +45,18 @@ public class TestNameNodeRpcServer {
|
||||||
// The name node in MiniDFSCluster only binds to 127.0.0.1.
|
// The name node in MiniDFSCluster only binds to 127.0.0.1.
|
||||||
// We can set the bind address to 0.0.0.0 to make it listen
|
// We can set the bind address to 0.0.0.0 to make it listen
|
||||||
// to all interfaces.
|
// to all interfaces.
|
||||||
|
// On IPv4-only machines it will return that it is listening on 0.0.0.0
|
||||||
|
// On dual-stack or IPv6-only machines it will return 0:0:0:0:0:0:0:0
|
||||||
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
|
conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0");
|
||||||
MiniDFSCluster cluster = null;
|
MiniDFSCluster cluster = null;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).build();
|
cluster = new MiniDFSCluster.Builder(conf).build();
|
||||||
cluster.waitActive();
|
cluster.waitActive();
|
||||||
assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc())
|
String listenerAddress = ((NameNodeRpcServer)cluster.getNameNodeRpc())
|
||||||
.getClientRpcServer().getListenerAddress().getHostName());
|
.getClientRpcServer().getListenerAddress().getHostName();
|
||||||
|
assertThat("Bind address " + listenerAddress + " is not wildcard.",
|
||||||
|
listenerAddress, anyOf(is("0.0.0.0"), is("0:0:0:0:0:0:0:0")));
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
cluster.shutdown();
|
cluster.shutdown();
|
||||||
|
|
|
@ -141,11 +141,14 @@ public class HostsFileWriter {
|
||||||
includeHosts.toString());
|
includeHosts.toString());
|
||||||
} else {
|
} else {
|
||||||
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
HashSet<DatanodeAdminProperties> allDNs = new HashSet<>();
|
||||||
for(String hostNameAndPort : hostNameAndPorts) {
|
for (String hostNameAndPort : hostNameAndPorts) {
|
||||||
String[] hostAndPort = hostNameAndPort.split(":");
|
int i = hostNameAndPort.lastIndexOf(':');
|
||||||
|
String port =
|
||||||
|
hostNameAndPort.substring(hostNameAndPort.lastIndexOf(":") + 1);
|
||||||
|
String addr = hostNameAndPort.substring(0, i);
|
||||||
DatanodeAdminProperties dn = new DatanodeAdminProperties();
|
DatanodeAdminProperties dn = new DatanodeAdminProperties();
|
||||||
dn.setHostName(hostAndPort[0]);
|
dn.setHostName(addr);
|
||||||
dn.setPort(Integer.parseInt(hostAndPort[1]));
|
dn.setPort(Integer.parseInt(port));
|
||||||
allDNs.add(dn);
|
allDNs.add(dn);
|
||||||
}
|
}
|
||||||
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
|
CombinedHostsFileWriter.writeFile(combinedFile.toString(), allDNs);
|
||||||
|
|
|
@ -0,0 +1,205 @@
|
||||||
|
/**
|
||||||
|
* Licensed to the Apache Software Foundation (ASF) under one
|
||||||
|
* or more contributor license agreements. See the NOTICE file
|
||||||
|
* distributed with this work for additional information
|
||||||
|
* regarding copyright ownership. The ASF licenses this file
|
||||||
|
* to you under the Apache License, Version 2.0 (the
|
||||||
|
* "License"); you may not use this file except in compliance
|
||||||
|
* with the License. You may obtain a copy of the License at
|
||||||
|
*
|
||||||
|
* http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
*
|
||||||
|
* Unless required by applicable law or agreed to in writing, software
|
||||||
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
* See the License for the specific language governing permissions and
|
||||||
|
* limitations under the License.
|
||||||
|
*/
|
||||||
|
package org.apache.hadoop.hdfs.util;
|
||||||
|
|
||||||
|
import org.apache.hadoop.thirdparty.com.google.common.net.InetAddresses;
|
||||||
|
import org.apache.commons.logging.Log;
|
||||||
|
import org.apache.commons.logging.LogFactory;
|
||||||
|
import org.apache.hadoop.hdfs.net.Peer;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||||
|
import org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
|
||||||
|
import org.apache.hadoop.net.unix.DomainSocket;
|
||||||
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import java.io.IOException;
|
||||||
|
import java.io.InputStream;
|
||||||
|
import java.io.OutputStream;
|
||||||
|
import java.net.Inet4Address;
|
||||||
|
import java.net.InetAddress;
|
||||||
|
import java.net.InetSocketAddress;
|
||||||
|
import java.net.SocketAddress;
|
||||||
|
import java.nio.channels.ReadableByteChannel;
|
||||||
|
|
||||||
|
import static org.junit.Assert.*;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This is a very basic, very fast test to test IPv6 parsing issues
|
||||||
|
* as we find them.
|
||||||
|
* It does NOT depend on having a working IPv6 stack and should
|
||||||
|
* succeed even if run
|
||||||
|
* with "-Djava.net.preferIPv4Stack=true"
|
||||||
|
*/
|
||||||
|
public class TestIPv6FormatCompatibility {
|
||||||
|
private static final String IPV6_LOOPBACK_LONG_STRING = "0:0:0:0:0:0:0:1";
|
||||||
|
private static final String IPV6_SAMPLE_ADDRESS =
|
||||||
|
"2a03:2880:2130:cf05:face:b00c:0:1";
|
||||||
|
private static final String IPV6_LOOPBACK_SHORT_STRING = "::1";
|
||||||
|
private static final String IPV4_LOOPBACK_WITH_PORT = "127.0.0.1:10";
|
||||||
|
private static final String IPV6_LOOPBACK_WITH_PORT =
|
||||||
|
"[" + IPV6_LOOPBACK_LONG_STRING + "]:10";
|
||||||
|
private static final String IPV6_SAMPLE_WITH_PORT =
|
||||||
|
"[" + IPV6_SAMPLE_ADDRESS + "]:10";
|
||||||
|
private static final InetAddress IPV6LOOPBACK =
|
||||||
|
InetAddresses.forString(IPV6_LOOPBACK_LONG_STRING);
|
||||||
|
private static final InetAddress IPV4LOOPBACK =
|
||||||
|
Inet4Address.getLoopbackAddress();
|
||||||
|
private static final InetAddress IPV6SAMPLE =
|
||||||
|
InetAddresses.forString(IPV6_SAMPLE_ADDRESS);
|
||||||
|
private static final String IPV4_LOOPBACK_STRING =
|
||||||
|
IPV4LOOPBACK.getHostAddress();
|
||||||
|
|
||||||
|
private static final Log LOG =
|
||||||
|
LogFactory.getLog(TestIPv6FormatCompatibility.class);
|
||||||
|
|
||||||
|
// HDFS-8078 : note that we're expecting URI-style
|
||||||
|
// (see Javadoc for java.net.URI or rfc2732)
|
||||||
|
@Test public void testDatanodeIDXferAddressAddsBrackets() {
|
||||||
|
DatanodeID ipv4localhost =
|
||||||
|
new DatanodeID(IPV4_LOOPBACK_STRING, "localhost", "no-uuid", 10, 20, 30,
|
||||||
|
40);
|
||||||
|
DatanodeID ipv6localhost =
|
||||||
|
new DatanodeID(IPV6_LOOPBACK_LONG_STRING, "localhost", "no-uuid", 10,
|
||||||
|
20, 30, 40);
|
||||||
|
DatanodeID ipv6sample =
|
||||||
|
new DatanodeID(IPV6_SAMPLE_ADDRESS, "ipv6.example.com", "no-uuid", 10,
|
||||||
|
20, 30, 40);
|
||||||
|
assertEquals("IPv6 should have brackets added", IPV6_LOOPBACK_WITH_PORT,
|
||||||
|
ipv6localhost.getXferAddr(false));
|
||||||
|
assertEquals("IPv6 should have brackets added", IPV6_SAMPLE_WITH_PORT,
|
||||||
|
ipv6sample.getXferAddr(false));
|
||||||
|
assertEquals("IPv4 should not have brackets added", IPV4_LOOPBACK_WITH_PORT,
|
||||||
|
ipv4localhost.getXferAddr(false));
|
||||||
|
}
|
||||||
|
|
||||||
|
// HDFS-8078
|
||||||
|
@Test
|
||||||
|
public void testDatanodeIDXferAddressShouldNormalizeIPv6() {
|
||||||
|
DatanodeID ipv6short =
|
||||||
|
new DatanodeID(IPV6_LOOPBACK_SHORT_STRING, "localhost", "no-uuid", 10,
|
||||||
|
20, 30, 40);
|
||||||
|
assertEquals("IPv6 should be normalized and not abbreviated",
|
||||||
|
IPV6_LOOPBACK_WITH_PORT, ipv6short.getXferAddr(false));
|
||||||
|
}
|
||||||
|
|
||||||
|
// HDFS-8078 : note that in some cases we're parsing the results of
|
||||||
|
// java.net.SocketAddress.toString() \
|
||||||
|
// which doesn't product the URI-style results, and we're splitting
|
||||||
|
// this rather than producing the combined string to be consumed.
|
||||||
|
@Test
|
||||||
|
public void testGetPeerShouldFindFullIPAddress() {
|
||||||
|
Peer ipv6SamplePeer = new MockInetPeer(IPV6SAMPLE, false);
|
||||||
|
Peer ipv4loopback = new MockInetPeer(IPV4LOOPBACK, false);
|
||||||
|
Peer ipv6loopback = new MockInetPeer(IPV6LOOPBACK, false);
|
||||||
|
assertNotNull(DataTransferSaslUtil.getPeerAddress(ipv6SamplePeer));
|
||||||
|
assertNotNull(DataTransferSaslUtil.getPeerAddress(ipv6loopback));
|
||||||
|
assertNotNull(DataTransferSaslUtil.getPeerAddress(ipv4loopback));
|
||||||
|
}
|
||||||
|
|
||||||
|
// HDFS-8078 : It looks like in some cases this could also produce URI-style
|
||||||
|
// results, so we test both.
|
||||||
|
@Test public void testGetPeerAccept() {
|
||||||
|
Peer ipv6loopbackAsURI = new MockInetPeer(IPV6LOOPBACK, true);
|
||||||
|
assertEquals("getPeer should still with URI-style [bracket]",
|
||||||
|
IPV6_LOOPBACK_LONG_STRING,
|
||||||
|
DataTransferSaslUtil.getPeerAddress(ipv6loopbackAsURI)
|
||||||
|
.getHostAddress());
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Mocks a Peer purely to test DataTransferSaslUtil,getPeerAddress() which
|
||||||
|
* takes a Peer and consumers getRemoteAddressString().
|
||||||
|
* All other functionality missing.
|
||||||
|
*/
|
||||||
|
private class MockInetPeer implements Peer {
|
||||||
|
private SocketAddress sa;
|
||||||
|
private boolean asURI;
|
||||||
|
|
||||||
|
MockInetPeer(InetAddress addr, boolean asURI) {
|
||||||
|
sa = new InetSocketAddress(addr, 50010);
|
||||||
|
this.asURI = asURI;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public ReadableByteChannel getInputStreamChannel() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setReadTimeout(int timeoutMs) throws IOException {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public int getReceiveBufferSize() throws IOException {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean getTcpNoDelay() throws IOException {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void setWriteTimeout(int timeoutMs) throws IOException {
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isClosed() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public void close() throws IOException {
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getRemoteAddressString() {
|
||||||
|
return sa.toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public String getLocalAddressString() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public InputStream getInputStream() throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public OutputStream getOutputStream() throws IOException {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean isLocal() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public DomainSocket getDomainSocket() {
|
||||||
|
return null;
|
||||||
|
}
|
||||||
|
|
||||||
|
@Override
|
||||||
|
public boolean hasSecureChannel() {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.fs.RemoteIterator;
|
||||||
import org.apache.hadoop.mapreduce.security.TokenCache;
|
import org.apache.hadoop.mapreduce.security.TokenCache;
|
||||||
import org.apache.hadoop.net.NetworkTopology;
|
import org.apache.hadoop.net.NetworkTopology;
|
||||||
import org.apache.hadoop.net.Node;
|
import org.apache.hadoop.net.Node;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.apache.hadoop.net.NodeBase;
|
import org.apache.hadoop.net.NodeBase;
|
||||||
import org.apache.hadoop.util.ReflectionUtils;
|
import org.apache.hadoop.util.ReflectionUtils;
|
||||||
import org.apache.hadoop.util.StopWatch;
|
import org.apache.hadoop.util.StopWatch;
|
||||||
|
@ -712,19 +713,19 @@ public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
|
||||||
|
|
||||||
private String[] identifyHosts(int replicationFactor,
|
private String[] identifyHosts(int replicationFactor,
|
||||||
Map<Node,NodeInfo> racksMap) {
|
Map<Node,NodeInfo> racksMap) {
|
||||||
|
|
||||||
String [] retVal = new String[replicationFactor];
|
String [] retVal = new String[replicationFactor];
|
||||||
|
|
||||||
List <NodeInfo> rackList = new LinkedList<NodeInfo>();
|
List <NodeInfo> rackList = new LinkedList<NodeInfo>();
|
||||||
|
|
||||||
rackList.addAll(racksMap.values());
|
rackList.addAll(racksMap.values());
|
||||||
|
|
||||||
// Sort the racks based on their contribution to this split
|
// Sort the racks based on their contribution to this split
|
||||||
sortInDescendingOrder(rackList);
|
sortInDescendingOrder(rackList);
|
||||||
|
|
||||||
boolean done = false;
|
boolean done = false;
|
||||||
int index = 0;
|
int index = 0;
|
||||||
|
|
||||||
// Get the host list for all our aggregated items, sort
|
// Get the host list for all our aggregated items, sort
|
||||||
// them and return the top entries
|
// them and return the top entries
|
||||||
for (NodeInfo ni: rackList) {
|
for (NodeInfo ni: rackList) {
|
||||||
|
@ -733,27 +734,27 @@ public abstract class FileInputFormat<K, V> implements InputFormat<K, V> {
|
||||||
|
|
||||||
List<NodeInfo>hostList = new LinkedList<NodeInfo>();
|
List<NodeInfo>hostList = new LinkedList<NodeInfo>();
|
||||||
hostList.addAll(hostSet);
|
hostList.addAll(hostSet);
|
||||||
|
|
||||||
// Sort the hosts in this rack based on their contribution
|
// Sort the hosts in this rack based on their contribution
|
||||||
sortInDescendingOrder(hostList);
|
sortInDescendingOrder(hostList);
|
||||||
|
|
||||||
for (NodeInfo host: hostList) {
|
for (NodeInfo host: hostList) {
|
||||||
// Strip out the port number from the host name
|
// Strip out the port number from the host name
|
||||||
retVal[index++] = host.node.getName().split(":")[0];
|
retVal[index++] = NetUtils.getHostFromHostPort(host.node.getName());
|
||||||
if (index == replicationFactor) {
|
if (index == replicationFactor) {
|
||||||
done = true;
|
done = true;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (done == true) {
|
if (done == true) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return retVal;
|
return retVal;
|
||||||
}
|
}
|
||||||
|
|
||||||
private String[] fakeRacks(BlockLocation[] blkLocations, int index)
|
private String[] fakeRacks(BlockLocation[] blkLocations, int index)
|
||||||
throws IOException {
|
throws IOException {
|
||||||
String[] allHosts = blkLocations[index].getHosts();
|
String[] allHosts = blkLocations[index].getHosts();
|
||||||
String[] allTopos = new String[allHosts.length];
|
String[] allTopos = new String[allHosts.length];
|
||||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.util;
|
||||||
|
|
||||||
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
import org.apache.hadoop.classification.InterfaceAudience.Private;
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
|
|
||||||
@Private
|
@Private
|
||||||
@Unstable
|
@Unstable
|
||||||
|
@ -56,10 +57,7 @@ public class HostUtil {
|
||||||
public static String convertTrackerNameToHostName(String trackerName) {
|
public static String convertTrackerNameToHostName(String trackerName) {
|
||||||
// Ugly!
|
// Ugly!
|
||||||
// Convert the trackerName to its host name
|
// Convert the trackerName to its host name
|
||||||
int indexOfColon = trackerName.indexOf(":");
|
String trackerHostName = NetUtils.getHostFromHostPort(trackerName);
|
||||||
String trackerHostName = (indexOfColon == -1) ?
|
|
||||||
trackerName :
|
|
||||||
trackerName.substring(0, indexOfColon);
|
|
||||||
return trackerHostName.substring("tracker_".length());
|
return trackerHostName.substring("tracker_".length());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -91,6 +91,7 @@ import org.apache.hadoop.yarn.webapp.WebApp;
|
||||||
import org.apache.hadoop.yarn.webapp.WebApps;
|
import org.apache.hadoop.yarn.webapp.WebApps;
|
||||||
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -173,10 +174,12 @@ public class HistoryClientService extends AbstractService {
|
||||||
.withXFSProtection(JHAdminConfig.MR_HISTORY_XFS_PREFIX)
|
.withXFSProtection(JHAdminConfig.MR_HISTORY_XFS_PREFIX)
|
||||||
.withAppClientProtocol(appClientProtocol)
|
.withAppClientProtocol(appClientProtocol)
|
||||||
.at(NetUtils.getHostPortString(bindAddress)).start(webApp);
|
.at(NetUtils.getHostPortString(bindAddress)).start(webApp);
|
||||||
|
|
||||||
String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf).split(":")[0];
|
String connectHost = MRWebAppUtil.getJHSWebappURLWithoutScheme(conf);
|
||||||
MRWebAppUtil.setJHSWebappURLWithoutScheme(conf,
|
|
||||||
connectHost + ":" + webApp.getListenerAddress().getPort());
|
MRWebAppUtil.setJHSWebappURLWithoutScheme(conf, HostAndPort
|
||||||
|
.fromParts(HostAndPort.fromString(connectHost).getHost(),
|
||||||
|
webApp.getListenerAddress().getPort()).toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
|
|
@ -37,6 +37,8 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class checks that RPCs can use specialized socket factories.
|
* This class checks that RPCs can use specialized socket factories.
|
||||||
*/
|
*/
|
||||||
|
@ -89,9 +91,9 @@ public class TestMRCJCSocketFactory {
|
||||||
"org.apache.hadoop.ipc.DummySocketFactory");
|
"org.apache.hadoop.ipc.DummySocketFactory");
|
||||||
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
|
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
|
||||||
String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
|
String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
|
||||||
String[] split = rmAddress.split(":");
|
HostAndPort hp = HostAndPort.fromString(rmAddress);
|
||||||
jconf.set(YarnConfiguration.RM_ADDRESS, split[0] + ':'
|
jconf.set("yarn.resourcemanager.address",
|
||||||
+ (Integer.parseInt(split[1]) + 10));
|
hp.getHost() + ':' + (hp.getPort() + 10));
|
||||||
client = new JobClient(jconf);
|
client = new JobClient(jconf);
|
||||||
|
|
||||||
JobStatus[] jobs = client.jobsToComplete();
|
JobStatus[] jobs = client.jobsToComplete();
|
||||||
|
|
|
@ -33,6 +33,7 @@ import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.conf.Configured;
|
import org.apache.hadoop.conf.Configured;
|
||||||
import org.apache.hadoop.fs.FileSystem;
|
import org.apache.hadoop.fs.FileSystem;
|
||||||
import org.apache.hadoop.fs.Path;
|
import org.apache.hadoop.fs.Path;
|
||||||
|
import org.apache.hadoop.mapreduce.util.HostUtil;
|
||||||
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
|
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
|
||||||
import org.apache.hadoop.util.GenericOptionsParser;
|
import org.apache.hadoop.util.GenericOptionsParser;
|
||||||
import org.apache.hadoop.util.Shell;
|
import org.apache.hadoop.util.Shell;
|
||||||
|
@ -342,7 +343,7 @@ public class ReliabilityTest extends Configured implements Tool {
|
||||||
LOG.info(new Date() + " Stopping a few trackers");
|
LOG.info(new Date() + " Stopping a few trackers");
|
||||||
|
|
||||||
for (String tracker : trackerNamesList) {
|
for (String tracker : trackerNamesList) {
|
||||||
String host = convertTrackerNameToHostName(tracker);
|
String host = HostUtil.convertTrackerNameToHostName(tracker);
|
||||||
LOG.info(new Date() + " Marking tracker on host: " + host);
|
LOG.info(new Date() + " Marking tracker on host: " + host);
|
||||||
fos.write((host + "\n").getBytes());
|
fos.write((host + "\n").getBytes());
|
||||||
if (count++ >= trackerNamesList.size()/2) {
|
if (count++ >= trackerNamesList.size()/2) {
|
||||||
|
@ -381,15 +382,6 @@ public class ReliabilityTest extends Configured implements Tool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private String convertTrackerNameToHostName(String trackerName) {
|
|
||||||
// Convert the trackerName to it's host name
|
|
||||||
int indexOfColon = trackerName.indexOf(":");
|
|
||||||
String trackerHostName = (indexOfColon == -1) ?
|
|
||||||
trackerName :
|
|
||||||
trackerName.substring(0, indexOfColon);
|
|
||||||
return trackerHostName.substring("tracker_".length());
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private class KillTaskThread extends Thread {
|
private class KillTaskThread extends Thread {
|
||||||
|
|
|
@ -151,6 +151,8 @@ import org.junit.Test;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
public class TestClientRedirect {
|
public class TestClientRedirect {
|
||||||
|
|
||||||
static {
|
static {
|
||||||
|
@ -325,9 +327,9 @@ public class TestClientRedirect {
|
||||||
application.setYarnApplicationState(YarnApplicationState.FINISHED);
|
application.setYarnApplicationState(YarnApplicationState.FINISHED);
|
||||||
application.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
|
application.setFinalApplicationStatus(FinalApplicationStatus.SUCCEEDED);
|
||||||
}
|
}
|
||||||
String[] split = AMHOSTADDRESS.split(":");
|
HostAndPort hp = HostAndPort.fromString(AMHOSTADDRESS);
|
||||||
application.setHost(split[0]);
|
application.setHost(hp.getHost());
|
||||||
application.setRpcPort(Integer.parseInt(split[1]));
|
application.setRpcPort(hp.getPort());
|
||||||
application.setUser("TestClientRedirect-user");
|
application.setUser("TestClientRedirect-user");
|
||||||
application.setName("N/A");
|
application.setName("N/A");
|
||||||
application.setQueue("N/A");
|
application.setQueue("N/A");
|
||||||
|
|
|
@ -834,20 +834,4 @@ public class UtilsForTests {
|
||||||
file.close();
|
file.close();
|
||||||
return file;
|
return file;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* This formats the long tasktracker name to just the FQDN
|
|
||||||
* @param taskTrackerLong String The long format of the tasktracker string
|
|
||||||
* @return String The FQDN of the tasktracker
|
|
||||||
* @throws Exception
|
|
||||||
*/
|
|
||||||
public static String getFQDNofTT (String taskTrackerLong) throws Exception {
|
|
||||||
//Getting the exact FQDN of the tasktracker from the tasktracker string.
|
|
||||||
String[] firstSplit = taskTrackerLong.split("_");
|
|
||||||
String tmpOutput = firstSplit[1];
|
|
||||||
String[] secondSplit = tmpOutput.split(":");
|
|
||||||
String tmpTaskTracker = secondSplit[0];
|
|
||||||
return tmpTaskTracker;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -44,6 +44,7 @@ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.apache.hadoop.yarn.server.MiniYARNCluster;
|
import org.apache.hadoop.yarn.server.MiniYARNCluster;
|
||||||
import org.eclipse.jetty.util.ajax.JSON;
|
import org.eclipse.jetty.util.ajax.JSON;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import org.slf4j.Logger;
|
import org.slf4j.Logger;
|
||||||
import org.slf4j.LoggerFactory;
|
import org.slf4j.LoggerFactory;
|
||||||
|
|
||||||
|
@ -193,8 +194,8 @@ public class MiniHadoopClusterManager {
|
||||||
map.put("namenode_port", dfs.getNameNodePort());
|
map.put("namenode_port", dfs.getNameNodePort());
|
||||||
}
|
}
|
||||||
if (mr != null) {
|
if (mr != null) {
|
||||||
map.put("resourcemanager_port", mr.getConfig().get(
|
map.put("resourcemanager_port", HostAndPort.fromString(
|
||||||
YarnConfiguration.RM_ADDRESS).split(":")[1]);
|
mr.getConfig().get(YarnConfiguration.RM_ADDRESS)).getPort());
|
||||||
}
|
}
|
||||||
FileWriter fw = new FileWriter(new File(writeDetails));
|
FileWriter fw = new FileWriter(new File(writeDetails));
|
||||||
fw.write(new JSON().toJSON(map));
|
fw.write(new JSON().toJSON(map));
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public;
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
import org.apache.hadoop.classification.InterfaceStability.Unstable;
|
||||||
import org.apache.hadoop.classification.InterfaceStability.Stable;
|
import org.apache.hadoop.classification.InterfaceStability.Stable;
|
||||||
import org.apache.hadoop.yarn.util.Records;
|
import org.apache.hadoop.yarn.util.Records;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* <p><code>NodeId</code> is the unique identifier for a node.</p>
|
* <p><code>NodeId</code> is the unique identifier for a node.</p>
|
||||||
|
@ -116,17 +117,18 @@ public abstract class NodeId implements Comparable<NodeId> {
|
||||||
@Public
|
@Public
|
||||||
@Stable
|
@Stable
|
||||||
public static NodeId fromString(String nodeIdStr) {
|
public static NodeId fromString(String nodeIdStr) {
|
||||||
String[] parts = nodeIdStr.split(":");
|
HostAndPort hp = HostAndPort.fromString(nodeIdStr);
|
||||||
if (parts.length != 2) {
|
if (!hp.hasPort()) {
|
||||||
throw new IllegalArgumentException("Invalid NodeId [" + nodeIdStr
|
throw new IllegalArgumentException(
|
||||||
+ "]. Expected host:port");
|
"Invalid NodeId [" + nodeIdStr + "]. Expected host:port");
|
||||||
}
|
}
|
||||||
try {
|
try {
|
||||||
NodeId nodeId =
|
String hostPortStr = hp.toString();
|
||||||
NodeId.newInstance(parts[0].trim(), Integer.parseInt(parts[1]));
|
String host = hostPortStr.substring(0, hostPortStr.lastIndexOf(":"));
|
||||||
|
NodeId nodeId = NodeId.newInstance(host, hp.getPort());
|
||||||
return nodeId;
|
return nodeId;
|
||||||
} catch (NumberFormatException e) {
|
} catch (NumberFormatException e) {
|
||||||
throw new IllegalArgumentException("Invalid port: " + parts[1], e);
|
throw new IllegalArgumentException("Invalid port: " + hp.getPort(), e);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||||
import org.apache.hadoop.yarn.api.records.NodeId;
|
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||||
import org.apache.hadoop.yarn.api.records.URL;
|
import org.apache.hadoop.yarn.api.records.URL;
|
||||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This class contains a set of utilities which help converting data structures
|
* This class contains a set of utilities which help converting data structures
|
||||||
|
@ -114,11 +114,11 @@ public class ConverterUtils {
|
||||||
|
|
||||||
@Private
|
@Private
|
||||||
@InterfaceStability.Unstable
|
@InterfaceStability.Unstable
|
||||||
public static NodeId toNodeIdWithDefaultPort(String nodeIdStr) {
|
public static NodeId toNodeIdWithDefaultPort(
|
||||||
if (nodeIdStr.indexOf(":") < 0) {
|
String nodeIdStr) {
|
||||||
return NodeId.fromString(nodeIdStr + ":0");
|
HostAndPort hp = HostAndPort.fromString(nodeIdStr);
|
||||||
}
|
hp = hp.withDefaultPort(0);
|
||||||
return NodeId.fromString(nodeIdStr);
|
return toNodeId(hp.toString());
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
|
|
@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.webapp.BadRequestException;
|
||||||
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
import org.apache.hadoop.yarn.webapp.NotFoundException;
|
||||||
import org.apache.http.NameValuePair;
|
import org.apache.http.NameValuePair;
|
||||||
import org.apache.http.client.utils.URLEncodedUtils;
|
import org.apache.http.client.utils.URLEncodedUtils;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
import javax.servlet.http.HttpServletRequest;
|
import javax.servlet.http.HttpServletRequest;
|
||||||
|
|
||||||
|
@ -64,15 +65,13 @@ public class WebAppUtils {
|
||||||
|
|
||||||
public static void setRMWebAppPort(Configuration conf, int port) {
|
public static void setRMWebAppPort(Configuration conf, int port) {
|
||||||
String hostname = getRMWebAppURLWithoutScheme(conf);
|
String hostname = getRMWebAppURLWithoutScheme(conf);
|
||||||
hostname =
|
HostAndPort hp = HostAndPort.fromString(hostname);
|
||||||
(hostname.contains(":")) ? hostname.substring(0, hostname.indexOf(":"))
|
setRMWebAppHostnameAndPort(conf, hp.getHost(), port);
|
||||||
: hostname;
|
|
||||||
setRMWebAppHostnameAndPort(conf, hostname, port);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
public static void setRMWebAppHostnameAndPort(Configuration conf,
|
public static void setRMWebAppHostnameAndPort(Configuration conf,
|
||||||
String hostname, int port) {
|
String hostname, int port) {
|
||||||
String resolvedAddress = hostname + ":" + port;
|
String resolvedAddress = HostAndPort.fromParts(hostname, port).toString();
|
||||||
if (YarnConfiguration.useHttps(conf)) {
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, resolvedAddress);
|
conf.set(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS, resolvedAddress);
|
||||||
} else {
|
} else {
|
||||||
|
@ -82,12 +81,11 @@ public class WebAppUtils {
|
||||||
|
|
||||||
public static void setNMWebAppHostNameAndPort(Configuration conf,
|
public static void setNMWebAppHostNameAndPort(Configuration conf,
|
||||||
String hostName, int port) {
|
String hostName, int port) {
|
||||||
|
String hostPortString = HostAndPort.fromParts(hostName, port).toString();
|
||||||
if (YarnConfiguration.useHttps(conf)) {
|
if (YarnConfiguration.useHttps(conf)) {
|
||||||
conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS,
|
conf.set(YarnConfiguration.NM_WEBAPP_HTTPS_ADDRESS, hostPortString);
|
||||||
hostName + ":" + port);
|
|
||||||
} else {
|
} else {
|
||||||
conf.set(YarnConfiguration.NM_WEBAPP_ADDRESS,
|
conf.set(YarnConfiguration.NM_WEBAPP_ADDRESS, hostPortString);
|
||||||
hostName + ":" + port);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -325,7 +323,8 @@ public class WebAppUtils {
|
||||||
String host = conf.getTrimmed(hostProperty);
|
String host = conf.getTrimmed(hostProperty);
|
||||||
if (host != null && !host.isEmpty()) {
|
if (host != null && !host.isEmpty()) {
|
||||||
if (webAppURLWithoutScheme.contains(":")) {
|
if (webAppURLWithoutScheme.contains(":")) {
|
||||||
webAppURLWithoutScheme = host + ":" + webAppURLWithoutScheme.split(":")[1];
|
String[] splits = webAppURLWithoutScheme.split(":");
|
||||||
|
webAppURLWithoutScheme = host + ":" + splits[splits.length - 1];
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " +
|
throw new YarnRuntimeException("webAppURLWithoutScheme must include port specification but doesn't: " +
|
||||||
|
|
|
@ -18,6 +18,8 @@
|
||||||
|
|
||||||
package org.apache.hadoop.yarn.conf;
|
package org.apache.hadoop.yarn.conf;
|
||||||
|
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
|
import org.apache.hadoop.net.NetUtils;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
|
@ -74,7 +76,7 @@ public class TestYarnConfiguration {
|
||||||
conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999");
|
conf.set(YarnConfiguration.RM_ADDRESS, "rmtesting:9999");
|
||||||
String rmWebUrl = WebAppUtils.getRMWebAppURLWithScheme(conf);
|
String rmWebUrl = WebAppUtils.getRMWebAppURLWithScheme(conf);
|
||||||
String[] parts = rmWebUrl.split(":");
|
String[] parts = rmWebUrl.split(":");
|
||||||
Assert.assertEquals("RM Web URL Port is incrrect", 24543,
|
Assert.assertEquals("RM Web URL Port is incorrect", 24543,
|
||||||
Integer.parseInt(parts[parts.length - 1]));
|
Integer.parseInt(parts[parts.length - 1]));
|
||||||
Assert.assertNotSame(
|
Assert.assertNotSame(
|
||||||
"RM Web Url not resolved correctly. Should not be rmtesting",
|
"RM Web Url not resolved correctly. Should not be rmtesting",
|
||||||
|
@ -112,10 +114,9 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||||
assertEquals(
|
assertEquals(new InetSocketAddress(
|
||||||
new InetSocketAddress(
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
|
||||||
resourceTrackerAddress);
|
resourceTrackerAddress);
|
||||||
|
|
||||||
//with address
|
//with address
|
||||||
|
@ -125,10 +126,8 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||||
assertEquals(
|
assertEquals(new InetSocketAddress("10.0.0.1",
|
||||||
new InetSocketAddress(
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||||
"10.0.0.1",
|
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
|
||||||
resourceTrackerAddress);
|
resourceTrackerAddress);
|
||||||
|
|
||||||
//address and socket
|
//address and socket
|
||||||
|
@ -139,9 +138,23 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||||
assertEquals(
|
assertEquals(
|
||||||
new InetSocketAddress(
|
new InetSocketAddress(
|
||||||
"10.0.0.2",
|
"10.0.0.2",
|
||||||
5001),
|
5001),
|
||||||
|
resourceTrackerAddress);
|
||||||
|
|
||||||
|
// IPv6 address and socket
|
||||||
|
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
|
"[2401:db00:20:a01e:face:0:5:0]:5001");
|
||||||
|
resourceTrackerAddress = conf.getSocketAddr(
|
||||||
|
YarnConfiguration.RM_BIND_HOST,
|
||||||
|
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||||
|
assertEquals(
|
||||||
|
new InetSocketAddress(
|
||||||
|
"2401:db00:20:a01e:face:0:5:0",
|
||||||
|
5001),
|
||||||
resourceTrackerAddress);
|
resourceTrackerAddress);
|
||||||
|
|
||||||
//bind host only
|
//bind host only
|
||||||
|
@ -152,10 +165,8 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||||
assertEquals(
|
assertEquals(new InetSocketAddress("10.0.0.3",
|
||||||
new InetSocketAddress(
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||||
"10.0.0.3",
|
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
|
||||||
resourceTrackerAddress);
|
resourceTrackerAddress);
|
||||||
|
|
||||||
//bind host and address no port
|
//bind host and address no port
|
||||||
|
@ -166,10 +177,8 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||||
assertEquals(
|
assertEquals(new InetSocketAddress("0.0.0.0",
|
||||||
new InetSocketAddress(
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
||||||
"0.0.0.0",
|
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT),
|
|
||||||
resourceTrackerAddress);
|
resourceTrackerAddress);
|
||||||
|
|
||||||
//bind host and address with port
|
//bind host and address with port
|
||||||
|
@ -180,10 +189,7 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT);
|
||||||
assertEquals(
|
assertEquals(new InetSocketAddress("0.0.0.0", 5003),
|
||||||
new InetSocketAddress(
|
|
||||||
"0.0.0.0",
|
|
||||||
5003),
|
|
||||||
resourceTrackerAddress);
|
resourceTrackerAddress);
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -197,9 +203,8 @@ public class TestYarnConfiguration {
|
||||||
//no override, old behavior. Won't work on a host named "yo.yo.yo"
|
//no override, old behavior. Won't work on a host named "yo.yo.yo"
|
||||||
conf = new YarnConfiguration();
|
conf = new YarnConfiguration();
|
||||||
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
|
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
|
||||||
serverAddress = new InetSocketAddress(
|
serverAddress = newInetSocketAddressFromHostPort(
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS);
|
||||||
Integer.parseInt(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
|
|
||||||
|
|
||||||
resourceTrackerConnectAddress = conf.updateConnectAddr(
|
resourceTrackerConnectAddress = conf.updateConnectAddr(
|
||||||
YarnConfiguration.RM_BIND_HOST,
|
YarnConfiguration.RM_BIND_HOST,
|
||||||
|
@ -207,15 +212,15 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
serverAddress);
|
serverAddress);
|
||||||
|
|
||||||
assertFalse(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
|
assertFalse(NetUtils.getSocketAddressString(resourceTrackerConnectAddress)
|
||||||
|
.startsWith("yo.yo.yo"));
|
||||||
|
|
||||||
//cause override with address
|
//cause override with address
|
||||||
conf = new YarnConfiguration();
|
conf = new YarnConfiguration();
|
||||||
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
|
conf.set(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS, "yo.yo.yo");
|
||||||
conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
|
conf.set(YarnConfiguration.RM_BIND_HOST, "0.0.0.0");
|
||||||
serverAddress = new InetSocketAddress(
|
serverAddress = newInetSocketAddressFromHostPort(
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[0],
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS);
|
||||||
Integer.parseInt(YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS.split(":")[1]));
|
|
||||||
|
|
||||||
resourceTrackerConnectAddress = conf.updateConnectAddr(
|
resourceTrackerConnectAddress = conf.updateConnectAddr(
|
||||||
YarnConfiguration.RM_BIND_HOST,
|
YarnConfiguration.RM_BIND_HOST,
|
||||||
|
@ -223,7 +228,8 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_ADDRESS,
|
||||||
serverAddress);
|
serverAddress);
|
||||||
|
|
||||||
assertTrue(resourceTrackerConnectAddress.toString().startsWith("yo.yo.yo"));
|
assertTrue(NetUtils.getSocketAddressString(resourceTrackerConnectAddress)
|
||||||
|
.startsWith("yo.yo.yo"));
|
||||||
|
|
||||||
//tests updateConnectAddr won't add suffix to NM service address configurations
|
//tests updateConnectAddr won't add suffix to NM service address configurations
|
||||||
conf = new YarnConfiguration();
|
conf = new YarnConfiguration();
|
||||||
|
@ -232,9 +238,8 @@ public class TestYarnConfiguration {
|
||||||
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
|
conf.setBoolean(YarnConfiguration.RM_HA_ENABLED, true);
|
||||||
conf.set(YarnConfiguration.RM_HA_ID, "rm1");
|
conf.set(YarnConfiguration.RM_HA_ID, "rm1");
|
||||||
|
|
||||||
serverAddress = new InetSocketAddress(
|
serverAddress = newInetSocketAddressFromHostPort(
|
||||||
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[0],
|
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS);
|
||||||
Integer.parseInt(YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS.split(":")[1]));
|
|
||||||
|
|
||||||
InetSocketAddress localizerAddress = conf.updateConnectAddr(
|
InetSocketAddress localizerAddress = conf.updateConnectAddr(
|
||||||
YarnConfiguration.NM_BIND_HOST,
|
YarnConfiguration.NM_BIND_HOST,
|
||||||
|
@ -242,8 +247,15 @@ public class TestYarnConfiguration {
|
||||||
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
|
YarnConfiguration.DEFAULT_NM_LOCALIZER_ADDRESS,
|
||||||
serverAddress);
|
serverAddress);
|
||||||
|
|
||||||
assertTrue(localizerAddress.toString().startsWith("yo.yo.yo"));
|
assertTrue(NetUtils.getSocketAddressString(localizerAddress)
|
||||||
|
.startsWith("yo.yo.yo"));
|
||||||
assertNull(conf.get(
|
assertNull(conf.get(
|
||||||
HAUtil.addSuffix(YarnConfiguration.NM_LOCALIZER_ADDRESS, "rm1")));
|
HAUtil.addSuffix(YarnConfiguration.NM_LOCALIZER_ADDRESS, "rm1")));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private InetSocketAddress newInetSocketAddressFromHostPort(
|
||||||
|
String hostPort) {
|
||||||
|
HostAndPort hp = HostAndPort.fromString(hostPort);
|
||||||
|
return new InetSocketAddress(hp.getHost(), hp.getPort());
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
public class TestConverterUtils {
|
public class TestConverterUtils {
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void testConvertUrlWithNoPort() throws URISyntaxException {
|
public void testConvertUrlWithNoPort() throws URISyntaxException {
|
||||||
Path expectedPath = new Path("hdfs://foo.com");
|
Path expectedPath = new Path("hdfs://foo.com");
|
||||||
|
@ -92,14 +92,24 @@ public class TestConverterUtils {
|
||||||
@Test
|
@Test
|
||||||
public void testNodeIdWithDefaultPort() throws URISyntaxException {
|
public void testNodeIdWithDefaultPort() throws URISyntaxException {
|
||||||
NodeId nid;
|
NodeId nid;
|
||||||
|
|
||||||
nid = ConverterUtils.toNodeIdWithDefaultPort("node:10");
|
nid = ConverterUtils.toNodeIdWithDefaultPort("node:10");
|
||||||
assertThat(nid.getPort()).isEqualTo(10);
|
assertThat(nid.getPort()).isEqualTo(10);
|
||||||
assertThat(nid.getHost()).isEqualTo("node");
|
assertThat(nid.getHost()).isEqualTo("node");
|
||||||
|
|
||||||
nid = ConverterUtils.toNodeIdWithDefaultPort("node");
|
nid = ConverterUtils.toNodeIdWithDefaultPort("node");
|
||||||
assertThat(nid.getPort()).isEqualTo(0);
|
assertThat(nid.getPort()).isEqualTo(0);
|
||||||
assertThat(nid.getHost()).isEqualTo("node");
|
assertThat(nid.getHost()).isEqualTo("node");
|
||||||
|
|
||||||
|
nid = ConverterUtils
|
||||||
|
.toNodeIdWithDefaultPort("[2401:db00:20:a01e:face:0:5:0]:10");
|
||||||
|
assertEquals(nid.getPort(), 10);
|
||||||
|
assertEquals(nid.getHost(), "[2401:db00:20:a01e:face:0:5:0]");
|
||||||
|
|
||||||
|
nid = ConverterUtils
|
||||||
|
.toNodeIdWithDefaultPort("[2401:db00:20:a01e:face:0:5:0]");
|
||||||
|
assertEquals(nid.getPort(), 0);
|
||||||
|
assertEquals(nid.getHost(), "[2401:db00:20:a01e:face:0:5:0]");
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test(expected = IllegalArgumentException.class)
|
@Test(expected = IllegalArgumentException.class)
|
||||||
|
|
|
@ -27,6 +27,7 @@ import java.net.InetSocketAddress;
|
||||||
import java.net.Socket;
|
import java.net.Socket;
|
||||||
|
|
||||||
import org.apache.hadoop.net.ServerSocketUtil;
|
import org.apache.hadoop.net.ServerSocketUtil;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
|
||||||
import org.apache.hadoop.yarn.lib.ZKClient;
|
import org.apache.hadoop.yarn.lib.ZKClient;
|
||||||
|
@ -86,8 +87,9 @@ public class TestZKClient {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
String host = hp.split(":")[0];
|
HostAndPort hap = HostAndPort.fromString(hp);
|
||||||
int port = Integer.parseInt(hp.split(":")[1]);
|
String host = hap.getHost();
|
||||||
|
int port = hap.getPort();
|
||||||
send4LetterWord(host, port, "stat");
|
send4LetterWord(host, port, "stat");
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
return true;
|
return true;
|
||||||
|
@ -110,8 +112,9 @@ public class TestZKClient {
|
||||||
long start = System.currentTimeMillis();
|
long start = System.currentTimeMillis();
|
||||||
while (true) {
|
while (true) {
|
||||||
try {
|
try {
|
||||||
String host = hp.split(":")[0];
|
HostAndPort hap = HostAndPort.fromString(hp);
|
||||||
int port = Integer.parseInt(hp.split(":")[1]);
|
String host = hap.getHost();
|
||||||
|
int port = hap.getPort();
|
||||||
// if there are multiple hostports, just take the first one
|
// if there are multiple hostports, just take the first one
|
||||||
String result = send4LetterWord(host, port, "stat");
|
String result = send4LetterWord(host, port, "stat");
|
||||||
if (result.startsWith("Zookeeper version:")) {
|
if (result.startsWith("Zookeeper version:")) {
|
||||||
|
@ -151,14 +154,15 @@ public class TestZKClient {
|
||||||
}
|
}
|
||||||
File dataDir = createTmpDir(BASETEST);
|
File dataDir = createTmpDir(BASETEST);
|
||||||
zks = new ZooKeeperServer(dataDir, dataDir, 3000);
|
zks = new ZooKeeperServer(dataDir, dataDir, 3000);
|
||||||
final int PORT = Integer.parseInt(hostPort.split(":")[1]);
|
HostAndPort hp = HostAndPort.fromString(hostPort);
|
||||||
|
final int port = hp.getPort();
|
||||||
if (factory == null) {
|
if (factory == null) {
|
||||||
factory = new NIOServerCnxnFactory();
|
factory = new NIOServerCnxnFactory();
|
||||||
factory.configure(new InetSocketAddress(PORT), maxCnxns);
|
factory.configure(new InetSocketAddress(port), maxCnxns);
|
||||||
}
|
}
|
||||||
factory.startup(zks);
|
factory.startup(zks);
|
||||||
Assert.assertTrue("waiting for server up",
|
Assert.assertTrue("waiting for server up",
|
||||||
waitForServerUp("127.0.0.1:" + PORT,
|
waitForServerUp("127.0.0.1:" + port,
|
||||||
CONNECTION_TIMEOUT));
|
CONNECTION_TIMEOUT));
|
||||||
|
|
||||||
}
|
}
|
||||||
|
@ -172,10 +176,11 @@ public class TestZKClient {
|
||||||
zkDb.close();
|
zkDb.close();
|
||||||
} catch (IOException ie) {
|
} catch (IOException ie) {
|
||||||
}
|
}
|
||||||
final int PORT = Integer.parseInt(hostPort.split(":")[1]);
|
HostAndPort hp = HostAndPort.fromString(hostPort);
|
||||||
|
final int port = hp.getPort();
|
||||||
|
|
||||||
Assert.assertTrue("waiting for server down",
|
Assert.assertTrue("waiting for server down",
|
||||||
waitForServerDown("127.0.0.1:" + PORT,
|
waitForServerDown("127.0.0.1:" + port,
|
||||||
CONNECTION_TIMEOUT));
|
CONNECTION_TIMEOUT));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -185,6 +185,7 @@ import java.util.Set;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock;
|
||||||
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
|
import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
|
|
||||||
import static org.apache.hadoop.service.Service.STATE.STARTED;
|
import static org.apache.hadoop.service.Service.STATE.STARTED;
|
||||||
|
|
||||||
|
@ -645,7 +646,7 @@ public class ContainerManagerImpl extends CompositeService implements
|
||||||
//hostname found when querying for our hostname with the specified
|
//hostname found when querying for our hostname with the specified
|
||||||
//address, combine the specified address with the actual port listened
|
//address, combine the specified address with the actual port listened
|
||||||
//on by the server
|
//on by the server
|
||||||
hostOverride = nmAddress.split(":")[0];
|
hostOverride = HostAndPort.fromString(nmAddress).getHost();
|
||||||
}
|
}
|
||||||
|
|
||||||
// setup node ID
|
// setup node ID
|
||||||
|
|
|
@ -137,6 +137,7 @@ import org.apache.hadoop.yarn.webapp.util.WebAppUtils;
|
||||||
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
|
import org.apache.zookeeper.server.auth.DigestAuthenticationProvider;
|
||||||
import org.eclipse.jetty.webapp.WebAppContext;
|
import org.eclipse.jetty.webapp.WebAppContext;
|
||||||
|
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import java.io.PrintStream;
|
import java.io.PrintStream;
|
||||||
|
@ -1399,8 +1400,8 @@ public class ResourceManager extends CompositeService
|
||||||
builder.withAttribute(WebAppProxy.PROXY_CA,
|
builder.withAttribute(WebAppProxy.PROXY_CA,
|
||||||
rmContext.getProxyCAManager().getProxyCA());
|
rmContext.getProxyCAManager().getProxyCA());
|
||||||
builder.withAttribute(WebAppProxy.FETCHER_ATTRIBUTE, fetcher);
|
builder.withAttribute(WebAppProxy.FETCHER_ATTRIBUTE, fetcher);
|
||||||
String[] proxyParts = proxyHostAndPort.split(":");
|
builder.withAttribute(WebAppProxy.PROXY_HOST_ATTRIBUTE,
|
||||||
builder.withAttribute(WebAppProxy.PROXY_HOST_ATTRIBUTE, proxyParts[0]);
|
HostAndPort.fromString(proxyHostAndPort).getHost());
|
||||||
}
|
}
|
||||||
|
|
||||||
WebAppContext uiWebAppContext = null;
|
WebAppContext uiWebAppContext = null;
|
||||||
|
|
|
@ -30,6 +30,7 @@ import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.ConcurrentHashMap;
|
import java.util.concurrent.ConcurrentHashMap;
|
||||||
|
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||||
import org.apache.hadoop.yarn.api.records.Container;
|
import org.apache.hadoop.yarn.api.records.Container;
|
||||||
|
@ -105,8 +106,10 @@ public class MockNM {
|
||||||
this.capability = capability;
|
this.capability = capability;
|
||||||
this.resourceTracker = resourceTracker;
|
this.resourceTracker = resourceTracker;
|
||||||
this.version = version;
|
this.version = version;
|
||||||
String[] splits = nodeIdStr.split(":");
|
HostAndPort hostAndPort = HostAndPort.fromString(nodeIdStr);
|
||||||
nodeId = BuilderUtils.newNodeId(splits[0], Integer.parseInt(splits[1]));
|
String hostPortStr = hostAndPort.toString();
|
||||||
|
String host = hostPortStr.substring(0, hostPortStr.lastIndexOf(":"));
|
||||||
|
nodeId = BuilderUtils.newNodeId(host, hostAndPort.getPort());
|
||||||
}
|
}
|
||||||
|
|
||||||
public MockNM(String nodeIdStr, Resource capability,
|
public MockNM(String nodeIdStr, Resource capability,
|
||||||
|
|
|
@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.webproxy;
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
import java.net.URI;
|
import java.net.URI;
|
||||||
|
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.HttpServer2;
|
import org.apache.hadoop.http.HttpServer2;
|
||||||
import org.apache.hadoop.security.authorize.AccessControlList;
|
import org.apache.hadoop.security.authorize.AccessControlList;
|
||||||
|
@ -57,7 +58,8 @@ public class WebAppProxy extends AbstractService {
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
protected void serviceInit(Configuration conf) throws Exception {
|
protected void serviceInit(Configuration conf) throws Exception {
|
||||||
String auth = conf.get(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
|
String auth = conf.get(
|
||||||
|
CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION);
|
||||||
if (auth == null || "simple".equals(auth)) {
|
if (auth == null || "simple".equals(auth)) {
|
||||||
isSecurityEnabled = false;
|
isSecurityEnabled = false;
|
||||||
} else if ("kerberos".equals(auth)) {
|
} else if ("kerberos".equals(auth)) {
|
||||||
|
@ -68,8 +70,7 @@ public class WebAppProxy extends AbstractService {
|
||||||
" of " + auth);
|
" of " + auth);
|
||||||
}
|
}
|
||||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||||
String[] proxyParts = proxy.split(":");
|
proxyHost = HostAndPort.fromString(proxy).getHost();
|
||||||
proxyHost = proxyParts[0];
|
|
||||||
|
|
||||||
fetcher = new AppReportFetcher(conf);
|
fetcher = new AppReportFetcher(conf);
|
||||||
bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
|
bindAddress = conf.get(YarnConfiguration.PROXY_ADDRESS);
|
||||||
|
|
|
@ -19,6 +19,7 @@
|
||||||
package org.apache.hadoop.yarn.server.webproxy.amfilter;
|
package org.apache.hadoop.yarn.server.webproxy.amfilter;
|
||||||
|
|
||||||
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
import org.apache.hadoop.thirdparty.com.google.common.annotations.VisibleForTesting;
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.FilterContainer;
|
import org.apache.hadoop.http.FilterContainer;
|
||||||
import org.apache.hadoop.http.FilterInitializer;
|
import org.apache.hadoop.http.FilterInitializer;
|
||||||
|
@ -38,14 +39,15 @@ public class AmFilterInitializer extends FilterInitializer {
|
||||||
private static final String FILTER_NAME = "AM_PROXY_FILTER";
|
private static final String FILTER_NAME = "AM_PROXY_FILTER";
|
||||||
private static final String FILTER_CLASS = AmIpFilter.class.getCanonicalName();
|
private static final String FILTER_CLASS = AmIpFilter.class.getCanonicalName();
|
||||||
public static final String RM_HA_URLS = "RM_HA_URLS";
|
public static final String RM_HA_URLS = "RM_HA_URLS";
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void initFilter(FilterContainer container, Configuration conf) {
|
public void initFilter(FilterContainer container, Configuration conf) {
|
||||||
Map<String, String> params = new HashMap<>();
|
Map<String, String> params = new HashMap<>();
|
||||||
List<String> proxies = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
|
List<String> proxies = WebAppUtils.getProxyHostsAndPortsForAmFilter(conf);
|
||||||
StringBuilder sb = new StringBuilder();
|
StringBuilder sb = new StringBuilder();
|
||||||
for (String proxy : proxies) {
|
for (String proxy : proxies) {
|
||||||
sb.append(proxy.split(":")[0]).append(AmIpFilter.PROXY_HOSTS_DELIMITER);
|
sb.append(HostAndPort.fromString(proxy).getHost())
|
||||||
|
.append(AmIpFilter.PROXY_HOSTS_DELIMITER);
|
||||||
}
|
}
|
||||||
sb.setLength(sb.length() - 1);
|
sb.setLength(sb.length() - 1);
|
||||||
params.put(AmIpFilter.PROXY_HOSTS, sb.toString());
|
params.put(AmIpFilter.PROXY_HOSTS, sb.toString());
|
||||||
|
|
|
@ -50,6 +50,7 @@ import javax.servlet.http.HttpServlet;
|
||||||
import javax.servlet.http.HttpServletRequest;
|
import javax.servlet.http.HttpServletRequest;
|
||||||
import javax.servlet.http.HttpServletResponse;
|
import javax.servlet.http.HttpServletResponse;
|
||||||
|
|
||||||
|
import com.google.common.net.HostAndPort;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
||||||
import org.apache.hadoop.http.HttpServer2;
|
import org.apache.hadoop.http.HttpServer2;
|
||||||
|
@ -589,8 +590,7 @@ public class TestWebAppProxyServlet {
|
||||||
proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, Boolean.TRUE);
|
proxyServer.setAttribute(IS_SECURITY_ENABLED_ATTRIBUTE, Boolean.TRUE);
|
||||||
|
|
||||||
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
String proxy = WebAppUtils.getProxyHostAndPort(conf);
|
||||||
String[] proxyParts = proxy.split(":");
|
String proxyHost = HostAndPort.fromString(proxy).getHost();
|
||||||
String proxyHost = proxyParts[0];
|
|
||||||
|
|
||||||
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
|
proxyServer.setAttribute(PROXY_HOST_ATTRIBUTE, proxyHost);
|
||||||
proxyServer.start();
|
proxyServer.start();
|
||||||
|
|
Loading…
Reference in New Issue