diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index ff52bd353de..2d6bcfebd4e 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -423,8 +423,8 @@ checkJavacWarnings () {
if [[ $? != 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
- -1 javac. The patch appears to cause tar ant target to fail."
- return 1
+ -1 javac. The patch appears to cause the build to fail."
+ return 2
fi
### Compare trunk and patch javac warning numbers
if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
@@ -528,6 +528,24 @@ $JIRA_COMMENT_FOOTER"
return 0
}
+###############################################################################
+### Install the new jars so tests and findbugs can find all of the updated jars
+buildAndInstall () {
+ echo ""
+ echo ""
+ echo "======================================================================"
+ echo "======================================================================"
+ echo " Installing all of the jars"
+ echo "======================================================================"
+ echo "======================================================================"
+ echo ""
+ echo ""
+ echo "$MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess"
+ $MVN install -Dmaven.javadoc.skip=true -DskipTests -D${PROJECT_NAME}PatchProcess
+ return $?
+}
+
+
###############################################################################
### Check there are no changes in the number of Findbugs warnings
checkFindbugsWarnings () {
@@ -882,15 +900,22 @@ if [[ $? != 0 ]] ; then
submitJiraComment 1
cleanupAndExit 1
fi
-checkJavadocWarnings
-(( RESULT = RESULT + $? ))
checkJavacWarnings
+JAVAC_RET=$?
+#2 is returned if the code could not compile
+if [[ $JAVAC_RET == 2 ]] ; then
+ submitJiraComment 1
+ cleanupAndExit 1
+fi
+(( RESULT = RESULT + $JAVAC_RET ))
+checkJavadocWarnings
(( RESULT = RESULT + $? ))
checkEclipseGeneration
(( RESULT = RESULT + $? ))
### Checkstyle not implemented yet
#checkStyle
#(( RESULT = RESULT + $? ))
+buildAndInstall
checkFindbugsWarnings
(( RESULT = RESULT + $? ))
checkReleaseAuditWarnings
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
index 48b6cbec6e3..4227d084385 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
@@ -26,7 +26,6 @@
import javax.security.auth.login.LoginContext;
import javax.security.auth.login.LoginException;
import java.io.IOException;
-import java.lang.reflect.Field;
import java.net.HttpURLConnection;
import java.net.URL;
import java.security.AccessControlContext;
@@ -196,11 +195,10 @@ public Void run() throws Exception {
try {
GSSManager gssManager = GSSManager.getInstance();
String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost();
-
+ Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
GSSName serviceName = gssManager.createName(servicePrincipal,
- GSSName.NT_HOSTBASED_SERVICE);
- Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal,
- gssManager);
+ oid);
+ oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
gssContext = gssManager.createContext(serviceName, oid, null,
GSSContext.DEFAULT_LIFETIME);
gssContext.requestCredDeleg(true);
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
index b37f39a50c6..28a4d3de90a 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/AuthenticationFilter.java
@@ -327,6 +327,8 @@ protected AuthenticationToken getToken(HttpServletRequest request) throws IOExce
@Override
public void doFilter(ServletRequest request, ServletResponse response, FilterChain filterChain)
throws IOException, ServletException {
+ boolean unauthorizedResponse = true;
+ String unauthorizedMsg = "";
HttpServletRequest httpRequest = (HttpServletRequest) request;
HttpServletResponse httpResponse = (HttpServletResponse) response;
try {
@@ -350,6 +352,7 @@ public void doFilter(ServletRequest request, ServletResponse response, FilterCha
newToken = true;
}
if (token != null) {
+ unauthorizedResponse = false;
if (LOG.isDebugEnabled()) {
LOG.debug("Request [{}] user [{}] authenticated", getRequestURL(httpRequest), token.getUserName());
}
@@ -378,17 +381,17 @@ public Principal getUserPrincipal() {
}
filterChain.doFilter(httpRequest, httpResponse);
}
- else {
- throw new AuthenticationException("Missing AuthenticationToken");
- }
} catch (AuthenticationException ex) {
+ unauthorizedMsg = ex.toString();
+ LOG.warn("Authentication exception: " + ex.getMessage(), ex);
+ }
+ if (unauthorizedResponse) {
if (!httpResponse.isCommitted()) {
Cookie cookie = createCookie("");
cookie.setMaxAge(0);
httpResponse.addCookie(cookie);
- httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, ex.getMessage());
+ httpResponse.sendError(HttpServletResponse.SC_UNAUTHORIZED, unauthorizedMsg);
}
- LOG.warn("Authentication exception: " + ex.getMessage(), ex);
}
}
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
index df8319c6643..5688e600f77 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
@@ -22,7 +22,6 @@
import java.lang.reflect.Method;
import org.ietf.jgss.GSSException;
-import org.ietf.jgss.GSSManager;
import org.ietf.jgss.Oid;
public class KerberosUtil {
@@ -34,8 +33,7 @@ public static String getKrb5LoginModuleName() {
: "com.sun.security.auth.module.Krb5LoginModule";
}
- public static Oid getOidClassInstance(String servicePrincipal,
- GSSManager gssManager)
+ public static Oid getOidInstance(String oidName)
throws ClassNotFoundException, GSSException, NoSuchFieldException,
IllegalAccessException {
Class> oidClass;
@@ -44,7 +42,7 @@ public static Oid getOidClassInstance(String servicePrincipal,
} else {
oidClass = Class.forName("sun.security.jgss.GSSUtil");
}
- Field oidField = oidClass.getDeclaredField("GSS_KRB5_MECH_OID");
+ Field oidField = oidClass.getDeclaredField(oidName);
return (Oid)oidField.get(oidClass);
}
diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
index e6e7c9cca00..692ceab92da 100644
--- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
@@ -145,10 +145,10 @@ public String call() throws Exception {
GSSContext gssContext = null;
try {
String servicePrincipal = KerberosTestUtils.getServerPrincipal();
+ Oid oid = KerberosUtil.getOidInstance("NT_GSS_KRB5_PRINCIPAL");
GSSName serviceName = gssManager.createName(servicePrincipal,
- GSSName.NT_HOSTBASED_SERVICE);
- Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal,
- gssManager);
+ oid);
+ oid = KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID");
gssContext = gssManager.createContext(serviceName, oid, null,
GSSContext.DEFAULT_LIFETIME);
gssContext.requestCredDeleg(true);
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt
index cd7539fb566..018bef311a0 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -63,8 +63,6 @@ Trunk (unreleased changes)
HADOOP-8290. Remove remaining references to hadoop.native.lib (harsh)
- HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
-
HADOOP-8308. Support cross-project Jenkins builds. (tomwhite)
BUG FIXES
@@ -129,6 +127,15 @@ Trunk (unreleased changes)
HADOOP-8339. jenkins complaining about 16 javadoc warnings
(Tom White and Robert Evans via tgraves)
+ HADOOP-8354. test-patch findbugs may fail if a dependent module is changed
+ (Tom White and Robert Evans)
+
+ HADOOP-8375. test-patch should stop immediately once it has found
+ compilation errors (bobby)
+
+ HADOOP-8395. Text shell command unnecessarily demands that a
+ SequenceFile's key class be WritableComparable (harsh)
+
OPTIMIZATIONS
HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -139,6 +146,9 @@ Release 2.0.0 - UNRELEASED
HADOOP-7920. Remove Avro Rpc. (suresh)
+ HADOOP-8388. Remove unused BlockLocation serialization.
+ (Colin Patrick McCabe via eli)
+
NEW FEATURES
HADOOP-7773. Add support for protocol buffer based RPC engine.
@@ -163,6 +173,9 @@ Release 2.0.0 - UNRELEASED
HADOOP-8210. Common side of HDFS-3148: The client should be able
to use multiple local interfaces for data transfer. (eli)
+ HADOOP-8343. Allow configuration of authorization for JmxJsonServlet and
+ MetricsServlet (tucu)
+
IMPROVEMENTS
HADOOP-7524. Change RPC to allow multiple protocols including multuple
@@ -284,6 +297,34 @@ Release 2.0.0 - UNRELEASED
HADOOP-8214. make hadoop script recognize a full set of deprecated commands (rvs via tucu)
+ HADOOP-8347. Hadoop Common logs misspell 'successful'.
+ (Philip Zeyliger via eli)
+
+ HADOOP-8350. Improve NetUtils.getInputStream to return a stream which has
+ a tunable timeout. (todd)
+
+ HADOOP-8356. FileSystem service loading mechanism should print the FileSystem
+ impl it is failing to load (tucu)
+
+ HADOOP-8340. SNAPSHOT build versions should compare as less than their eventual
+ final release. (todd)
+
+ HADOOP-8361. Avoid out-of-memory problems when deserializing strings.
+ (Colin Patrick McCabe via eli)
+
+ HADOOP-8353. hadoop-daemon.sh and yarn-daemon.sh can be misleading on stop.
+ (Roman Shaposhnik via atm)
+
+ HADOOP-8224. Don't hardcode hdfs.audit.logger in the scripts.
+ (Tomohiko Kinebuchi via eli)
+
+ HADOOP-8113. Correction to BUILDING.txt: HDFS needs ProtocolBuffer, too
+ (not just MapReduce). Contributed by Eugene Koontz.
+
+ HADOOP-8285 Use ProtoBuf for RpcPayLoadHeader (sanjay radia)
+
+ HADOOP-8366 Use ProtoBuf for RpcResponseHeader (sanjay radia)
+
OPTIMIZATIONS
BUG FIXES
@@ -314,8 +355,6 @@ Release 2.0.0 - UNRELEASED
HADOOP-8104. Inconsistent Jackson versions (tucu)
- HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh)
-
HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth.
(szetszwo)
@@ -406,6 +445,22 @@ Release 2.0.0 - UNRELEASED
HADOOP-8342. HDFS command fails with exception following merge of
HADOOP-8325 (tucu)
+ HADOOP-8346. Makes oid changes to make SPNEGO work. Was broken due
+ to fixes introduced by the IBM JDK compatibility patch. (ddas)
+
+ HADOOP-8355. SPNEGO filter throws/logs exception when authentication fails (tucu)
+
+ HADOOP-8349. ViewFS doesn't work when the root of a file system is mounted. (atm)
+
+ HADOOP-8328. Duplicate FileSystem Statistics object for 'file' scheme.
+ (tomwhite)
+
+ HADOOP-8359. Fix javadoc warnings in Configuration. (Anupam Seth via
+ szetszwo)
+
+ HADOOP-8372. NetUtils.normalizeHostName() incorrectly handles hostname
+ starting with a numeric character. (Junping Du via suresh)
+
BREAKDOWN OF HADOOP-7454 SUBTASKS
HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
@@ -464,6 +519,11 @@ Release 2.0.0 - UNRELEASED
HADOOP-8172. Configuration no longer sets all keys in a deprecated key
list. (Anupam Seth via bobby)
+ HADOOP-7868. Hadoop native fails to compile when default linker
+ option is -Wl,--as-needed. (Trevor Robinson via eli)
+
+ HADOOP-8316. Audit logging should be disabled by default. (eli)
+
Release 0.23.3 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -519,6 +579,13 @@ Release 0.23.3 - UNRELEASED
HADOOP-8335. Improve Configuration's address handling (Daryn Sharp via
bobby)
+ HADOOP-8327. distcpv2 and distcpv1 jars should not coexist (Dave Thompson
+ via bobby)
+
+ HADOOP-8341. Fix or filter findbugs issues in hadoop-tools (bobby)
+
+ HADOOP-8373. Port RPC.getServerAddress to 0.23 (Daryn Sharp via bobby)
+
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
index 2688742756c..9d192501ed2 100755
--- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
+++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop-daemon.sh
@@ -109,8 +109,10 @@ fi
export HADOOP_LOGFILE=hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.log
export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-"INFO,RFA"}
export HADOOP_SECURITY_LOGGER=${HADOOP_SECURITY_LOGGER:-"INFO,RFAS"}
+export HDFS_AUDIT_LOGGER=${HDFS_AUDIT_LOGGER:-"INFO,NullAppender"}
log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-$HOSTNAME.out
pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
+HADOOP_STOP_TIMEOUT=${HADOOP_STOP_TIMEOUT:-5}
# Set default scheduling priority
if [ "$HADOOP_NICENESS" = "" ]; then
@@ -162,9 +164,15 @@ case $startStop in
(stop)
if [ -f $pid ]; then
- if kill -0 `cat $pid` > /dev/null 2>&1; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo stopping $command
- kill `cat $pid`
+ kill $TARGET_PID
+ sleep $HADOOP_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $HADOOP_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
else
echo no $command to stop
fi
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 3470b3ef1b7..63e27cf72f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
#Security appender
#
-hadoop.security.logger=INFO,console
+hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
@@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
#
# hdfs audit logging
#
-hdfs.audit.logger=INFO,console
+hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# mapred audit logging
#
-mapred.audit.logger=INFO,console
+mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
index 5ca8537135f..da39fa57b74 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/ConfServlet.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.conf;
import java.io.IOException;
-import java.io.OutputStreamWriter;
import java.io.Writer;
import javax.servlet.ServletException;
@@ -57,9 +56,8 @@ private Configuration getConfFromContext() {
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
- // Do the authorization
- if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
- response)) {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
return;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
index 044e5cb08a3..d1ef7a49fec 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
@@ -278,7 +278,7 @@ private final String getWarningMessage(String key) {
* @param key
* @param newKeys
* @param customMessage
- * @deprecated use {@link addDeprecation(String key, String newKey,
+ * @deprecated use {@link #addDeprecation(String key, String newKey,
String customMessage)} instead
*/
@Deprecated
@@ -328,7 +328,7 @@ public synchronized static void addDeprecation(String key, String newKey,
*
* @param key Key that is to be deprecated
* @param newKeys list of keys that take up the values of deprecated key
- * @deprecated use {@link addDeprecation(String key, String newKey)} instead
+ * @deprecated use {@link #addDeprecation(String key, String newKey)} instead
*/
@Deprecated
public synchronized static void addDeprecation(String key, String[] newKeys) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 86974734b59..cbcce217b61 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -346,7 +346,7 @@ public void checkPath(Path path) {
path);
} else {
throw new InvalidPathException(
- "Path without scheme with non-null autorhrity:" + path);
+ "Path without scheme with non-null authority:" + path);
}
}
String thisScheme = this.getUri().getScheme();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
index 7a107cf0564..46989f2204c 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/BlockLocation.java
@@ -35,16 +35,7 @@
*/
@InterfaceAudience.Public
@InterfaceStability.Stable
-public class BlockLocation implements Writable {
-
- static { // register a ctor
- WritableFactories.setFactory
- (BlockLocation.class,
- new WritableFactory() {
- public Writable newInstance() { return new BlockLocation(); }
- });
- }
-
+public class BlockLocation {
private String[] hosts; //hostnames of datanodes
private String[] names; //hostname:portNumber of datanodes
private String[] topologyPaths; // full path name in network topology
@@ -219,62 +210,6 @@ public void setTopologyPaths(String[] topologyPaths) throws IOException {
}
}
- /**
- * Implement write of Writable
- */
- public void write(DataOutput out) throws IOException {
- out.writeLong(offset);
- out.writeLong(length);
- out.writeBoolean(corrupt);
- out.writeInt(names.length);
- for (int i=0; i < names.length; i++) {
- Text name = new Text(names[i]);
- name.write(out);
- }
- out.writeInt(hosts.length);
- for (int i=0; i < hosts.length; i++) {
- Text host = new Text(hosts[i]);
- host.write(out);
- }
- out.writeInt(topologyPaths.length);
- for (int i=0; i < topologyPaths.length; i++) {
- Text host = new Text(topologyPaths[i]);
- host.write(out);
- }
- }
-
- /**
- * Implement readFields of Writable
- */
- public void readFields(DataInput in) throws IOException {
- this.offset = in.readLong();
- this.length = in.readLong();
- this.corrupt = in.readBoolean();
- int numNames = in.readInt();
- this.names = new String[numNames];
- for (int i = 0; i < numNames; i++) {
- Text name = new Text();
- name.readFields(in);
- names[i] = name.toString();
- }
-
- int numHosts = in.readInt();
- this.hosts = new String[numHosts];
- for (int i = 0; i < numHosts; i++) {
- Text host = new Text();
- host.readFields(in);
- hosts[i] = host.toString();
- }
-
- int numTops = in.readInt();
- topologyPaths = new String[numTops];
- for (int i = 0; i < numTops; i++) {
- Text path = new Text();
- path.readFields(in);
- topologyPaths[i] = path.toString();
- }
- }
-
public String toString() {
StringBuilder result = new StringBuilder();
result.append(offset);
@@ -289,4 +224,4 @@ public String toString() {
}
return result.toString();
}
-}
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
index 9bc5c374593..67f3bc594c9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
@@ -228,6 +228,9 @@ public class CommonConfigurationKeysPublic {
public static final String HADOOP_SECURITY_AUTHORIZATION =
"hadoop.security.authorization";
/** See core-default.xml */
+ public static final String HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN =
+ "hadoop.security.instrumentation.requires.admin";
+ /** See core-default.xml */
public static final String HADOOP_SECURITY_SERVICE_USER_NAME_KEY =
"hadoop.security.service.user.name.key";
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
index f4492e2f235..4cc2c182d53 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileStatus.java
@@ -254,7 +254,7 @@ public void setSymlink(final Path p) {
// Writable
//////////////////////////////////////////////////
public void write(DataOutput out) throws IOException {
- Text.writeString(out, getPath().toString());
+ Text.writeString(out, getPath().toString(), Text.ONE_MEGABYTE);
out.writeLong(getLen());
out.writeBoolean(isDirectory());
out.writeShort(getReplication());
@@ -262,16 +262,16 @@ public void write(DataOutput out) throws IOException {
out.writeLong(getModificationTime());
out.writeLong(getAccessTime());
getPermission().write(out);
- Text.writeString(out, getOwner());
- Text.writeString(out, getGroup());
+ Text.writeString(out, getOwner(), Text.ONE_MEGABYTE);
+ Text.writeString(out, getGroup(), Text.ONE_MEGABYTE);
out.writeBoolean(isSymlink());
if (isSymlink()) {
- Text.writeString(out, getSymlink().toString());
+ Text.writeString(out, getSymlink().toString(), Text.ONE_MEGABYTE);
}
}
public void readFields(DataInput in) throws IOException {
- String strPath = Text.readString(in);
+ String strPath = Text.readString(in, Text.ONE_MEGABYTE);
this.path = new Path(strPath);
this.length = in.readLong();
this.isdir = in.readBoolean();
@@ -280,10 +280,10 @@ public void readFields(DataInput in) throws IOException {
modification_time = in.readLong();
access_time = in.readLong();
permission.readFields(in);
- owner = Text.readString(in);
- group = Text.readString(in);
+ owner = Text.readString(in, Text.ONE_MEGABYTE);
+ group = Text.readString(in, Text.ONE_MEGABYTE);
if (in.readBoolean()) {
- this.symlink = new Path(Text.readString(in));
+ this.symlink = new Path(Text.readString(in, Text.ONE_MEGABYTE));
} else {
this.symlink = null;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
index 9229b84e8ff..b8879a29d59 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java
@@ -199,7 +199,7 @@ public void initialize(URI name, Configuration conf) throws IOException {
* @return the protocol scheme for the FileSystem.
*/
public String getScheme() {
- throw new UnsupportedOperationException("Not implemented by the FileSystem implementation");
+ throw new UnsupportedOperationException("Not implemented by the " + getClass().getSimpleName() + " FileSystem implementation");
}
/** Returns a URI whose scheme and authority identify this FileSystem.*/
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
index 1794c3d032f..6cbaf591e5a 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFileSystem.java
@@ -53,7 +53,7 @@
public class FilterFileSystem extends FileSystem {
protected FileSystem fs;
- private String swapScheme;
+ protected String swapScheme;
/*
* so that extending classes can define it
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
index ac9b25d972a..394c01f7054 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalFileSystem.java
@@ -39,6 +39,17 @@ public class LocalFileSystem extends ChecksumFileSystem {
public LocalFileSystem() {
this(new RawLocalFileSystem());
}
+
+ @Override
+ public void initialize(URI name, Configuration conf) throws IOException {
+ if (fs.getConf() == null) {
+ fs.initialize(name, conf);
+ }
+ String scheme = name.getScheme();
+ if (!scheme.equals(fs.getUri().getScheme())) {
+ swapScheme = scheme;
+ }
+ }
/**
* Return the protocol scheme for the FileSystem.
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
index 2fbed2a2bb2..3d193dfad28 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java
@@ -223,6 +223,13 @@ public boolean isAbsolute() {
return isUriPathAbsolute();
}
+ /**
+ * @return true if and only if this path represents the root of a file system
+ */
+ public boolean isRoot() {
+ return getParent() == null;
+ }
+
/** Returns the final component of this path.*/
public String getName() {
String path = uri.getPath();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
index a26d2f422a9..5642d0f5b92 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/permission/PermissionStatus.java
@@ -84,8 +84,8 @@ public PermissionStatus applyUMask(FsPermission umask) {
/** {@inheritDoc} */
public void readFields(DataInput in) throws IOException {
- username = Text.readString(in);
- groupname = Text.readString(in);
+ username = Text.readString(in, Text.ONE_MEGABYTE);
+ groupname = Text.readString(in, Text.ONE_MEGABYTE);
permission = FsPermission.read(in);
}
@@ -110,8 +110,8 @@ public static void write(DataOutput out,
String username,
String groupname,
FsPermission permission) throws IOException {
- Text.writeString(out, username);
- Text.writeString(out, groupname);
+ Text.writeString(out, username, Text.ONE_MEGABYTE);
+ Text.writeString(out, groupname, Text.ONE_MEGABYTE);
permission.write(out);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
index 8a05a55310e..59358632a77 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
@@ -34,7 +34,6 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.compress.CompressionCodec;
import org.apache.hadoop.io.compress.CompressionCodecFactory;
import org.apache.hadoop.util.ReflectionUtils;
@@ -136,7 +135,7 @@ protected InputStream getInputStream(PathData item) throws IOException {
protected class TextRecordInputStream extends InputStream {
SequenceFile.Reader r;
- WritableComparable> key;
+ Writable key;
Writable val;
DataInputBuffer inbuf;
@@ -148,7 +147,7 @@ public TextRecordInputStream(FileStatus f) throws IOException {
r = new SequenceFile.Reader(lconf,
SequenceFile.Reader.file(fpath));
key = ReflectionUtils.newInstance(
- r.getKeyClass().asSubclass(WritableComparable.class), lconf);
+ r.getKeyClass().asSubclass(Writable.class), lconf);
val = ReflectionUtils.newInstance(
r.getValueClass().asSubclass(Writable.class), lconf);
inbuf = new DataInputBuffer();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
index 209fd216d14..85426fa4fff 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFileSystem.java
@@ -75,7 +75,8 @@ protected FileSystem getMyFs() {
protected Path fullPath(final Path path) {
super.checkPath(path);
return path.isAbsolute() ?
- new Path(chRootPathPartString + path.toUri().getPath()) :
+ new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
+ + path.toUri().getPath()) :
new Path(chRootPathPartString + workingDir.toUri().getPath(), path);
}
@@ -127,7 +128,7 @@ String stripOutRoot(final Path p) throws IOException {
}
String pathPart = p.toUri().getPath();
return (pathPart.length() == chRootPathPartString.length()) ? "" : pathPart
- .substring(chRootPathPartString.length() + 1);
+ .substring(chRootPathPartString.length() + (chRootPathPart.isRoot() ? 0 : 1));
}
@Override
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
index 063d0d04fa9..f6e27d28151 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/viewfs/ChRootedFs.java
@@ -79,7 +79,8 @@ protected AbstractFileSystem getMyFs() {
*/
protected Path fullPath(final Path path) {
super.checkPath(path);
- return new Path(chRootPathPartString + path.toUri().getPath());
+ return new Path((chRootPathPart.isRoot() ? "" : chRootPathPartString)
+ + path.toUri().getPath());
}
public ChRootedFs(final AbstractFileSystem fs, final Path theRoot)
@@ -127,7 +128,8 @@ public String stripOutRoot(final Path p) {
}
String pathPart = p.toUri().getPath();
return (pathPart.length() == chRootPathPartString.length()) ?
- "" : pathPart.substring(chRootPathPartString.length() + 1);
+ "" : pathPart.substring(chRootPathPartString.length() +
+ (chRootPathPart.isRoot() ? 0 : 1));
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
index 6a2c9fa360c..be4f26fbf2b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
@@ -52,8 +52,6 @@
import org.apache.hadoop.jmx.JMXJsonServlet;
import org.apache.hadoop.log.LogLevel;
import org.apache.hadoop.metrics.MetricsServlet;
-import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
-import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector.MODE;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.ReflectionUtils;
@@ -99,6 +97,7 @@ public class HttpServer implements FilterContainer {
// gets stored.
public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
static final String ADMINS_ACL = "admins.acl";
+ public static final String SPNEGO_FILTER = "SpnegoFilter";
public static final String BIND_ADDRESS = "bind.address";
@@ -237,11 +236,7 @@ public HttpServer(String name, String bindAddress, int port,
webServer.addHandler(webAppContext);
addDefaultApps(contexts, appDir, conf);
-
- defineFilter(webAppContext, "krb5Filter",
- Krb5AndCertsSslSocketConnector.Krb5SslFilter.class.getName(),
- null, null);
-
+
addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
final FilterInitializer[] initializers = getFilterInitializers(conf);
if (initializers != null) {
@@ -424,12 +419,13 @@ public void addInternalServlet(String name, String pathSpec,
* protect with Kerberos authentication.
* Note: This method is to be used for adding servlets that facilitate
* internal communication and not for user facing functionality. For
- * servlets added using this method, filters (except internal Kerberized
+ + * servlets added using this method, filters (except internal Kerberos
* filters) are not enabled.
*
* @param name The name of the servlet (can be passed as null)
* @param pathSpec The path spec for the servlet
* @param clazz The servlet class
+ * @param requireAuth Require Kerberos authenticate to access servlet
*/
public void addInternalServlet(String name, String pathSpec,
Class extends HttpServlet> clazz, boolean requireAuth) {
@@ -440,11 +436,11 @@ public void addInternalServlet(String name, String pathSpec,
webAppContext.addServlet(holder, pathSpec);
if(requireAuth && UserGroupInformation.isSecurityEnabled()) {
- LOG.info("Adding Kerberos filter to " + name);
+ LOG.info("Adding Kerberos (SPNEGO) filter to " + name);
ServletHandler handler = webAppContext.getServletHandler();
FilterMapping fmap = new FilterMapping();
fmap.setPathSpec(pathSpec);
- fmap.setFilterName("krb5Filter");
+ fmap.setFilterName(SPNEGO_FILTER);
fmap.setDispatches(Handler.ALL);
handler.addFilterMapping(fmap);
}
@@ -580,26 +576,14 @@ public void addSslListener(InetSocketAddress addr, String keystore,
webServer.addConnector(sslListener);
}
- /**
- * Configure an ssl listener on the server.
- * @param addr address to listen on
- * @param sslConf conf to retrieve ssl options
- * @param needClientAuth whether client authentication is required
- */
- public void addSslListener(InetSocketAddress addr, Configuration sslConf,
- boolean needClientAuth) throws IOException {
- addSslListener(addr, sslConf, needClientAuth, false);
- }
-
/**
* Configure an ssl listener on the server.
* @param addr address to listen on
* @param sslConf conf to retrieve ssl options
* @param needCertsAuth whether x509 certificate authentication is required
- * @param needKrbAuth whether to allow kerberos auth
*/
public void addSslListener(InetSocketAddress addr, Configuration sslConf,
- boolean needCertsAuth, boolean needKrbAuth) throws IOException {
+ boolean needCertsAuth) throws IOException {
if (webServer.isStarted()) {
throw new IOException("Failed to add ssl listener");
}
@@ -612,15 +596,7 @@ public void addSslListener(InetSocketAddress addr, Configuration sslConf,
System.setProperty("javax.net.ssl.trustStoreType", sslConf.get(
"ssl.server.truststore.type", "jks"));
}
- Krb5AndCertsSslSocketConnector.MODE mode;
- if(needCertsAuth && needKrbAuth)
- mode = MODE.BOTH;
- else if (!needCertsAuth && needKrbAuth)
- mode = MODE.KRB;
- else // Default to certificates
- mode = MODE.CERTS;
-
- SslSocketConnector sslListener = new Krb5AndCertsSslSocketConnector(mode);
+ SslSocketConnector sslListener = new SslSocketConnector();
sslListener.setHost(addr.getHostName());
sslListener.setPort(addr.getPort());
sslListener.setKeystore(sslConf.get("ssl.server.keystore.location"));
@@ -779,6 +755,37 @@ public String toString() {
: "Inactive HttpServer";
}
+ /**
+ * Checks the user has privileges to access to instrumentation servlets.
+ *
+ * If hadoop.security.instrumentation.requires.admin is set to FALSE
+ * (default value) it always returns TRUE.
+ *
+ * If hadoop.security.instrumentation.requires.admin is set to TRUE
+ * it will check that if the current user is in the admin ACLS. If the user is
+ * in the admin ACLs it returns TRUE, otherwise it returns FALSE.
+ *
+ * @param servletContext the servlet context.
+ * @param request the servlet request.
+ * @param response the servlet response.
+ * @return TRUE/FALSE based on the logic decribed above.
+ */
+ public static boolean isInstrumentationAccessAllowed(
+ ServletContext servletContext, HttpServletRequest request,
+ HttpServletResponse response) throws IOException {
+ Configuration conf =
+ (Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
+
+ boolean access = true;
+ boolean adminAccess = conf.getBoolean(
+ CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+ false);
+ if (adminAccess) {
+ access = hasAdministratorAccess(servletContext, request, response);
+ }
+ return access;
+ }
+
/**
* Does the user sending the HttpServletRequest has the administrator ACLs? If
* it isn't the case, response will be modified to send an error to the user.
@@ -794,7 +801,6 @@ public static boolean hasAdministratorAccess(
HttpServletResponse response) throws IOException {
Configuration conf =
(Configuration) servletContext.getAttribute(CONF_CONTEXT_ATTRIBUTE);
-
// If there is no authorization, anybody has administrator access.
if (!conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
@@ -834,12 +840,11 @@ public static class StackServlet extends HttpServlet {
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
- response.setContentType("text/plain; charset=UTF-8");
- // Do the authorization
- if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
- response)) {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
return;
}
+ response.setContentType("text/plain; charset=UTF-8");
PrintWriter out = response.getWriter();
ReflectionUtils.printThreadInfo(out, "");
out.close();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
index 5c52883ebf5..0bee33236d6 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
@@ -239,7 +239,6 @@ public void append(byte[] utf8, int start, int len) {
*/
public void clear() {
length = 0;
- bytes = EMPTY_BYTES;
}
/*
@@ -413,6 +412,8 @@ public static ByteBuffer encode(String string, boolean replace)
return bytes;
}
+ static final public int ONE_MEGABYTE = 1024 * 1024;
+
/** Read a UTF8 encoded string from in
*/
public static String readString(DataInput in) throws IOException {
@@ -421,7 +422,17 @@ public static String readString(DataInput in) throws IOException {
in.readFully(bytes, 0, length);
return decode(bytes);
}
-
+
+ /** Read a UTF8 encoded string with a maximum size
+ */
+ public static String readString(DataInput in, int maxLength)
+ throws IOException {
+ int length = WritableUtils.readVIntInRange(in, 0, maxLength - 1);
+ byte [] bytes = new byte[length];
+ in.readFully(bytes, 0, length);
+ return decode(bytes);
+ }
+
/** Write a UTF8 encoded string to out
*/
public static int writeString(DataOutput out, String s) throws IOException {
@@ -432,6 +443,22 @@ public static int writeString(DataOutput out, String s) throws IOException {
return length;
}
+ /** Write a UTF8 encoded string with a maximum size to out
+ */
+ public static int writeString(DataOutput out, String s, int maxLength)
+ throws IOException {
+ ByteBuffer bytes = encode(s);
+ int length = bytes.limit();
+ if (length >= maxLength) {
+ throw new IOException("string was too long to write! Expected " +
+ "less than " + maxLength + " bytes, but got " +
+ length + " bytes.");
+ }
+ WritableUtils.writeVInt(out, length);
+ out.write(bytes.array(), 0, length);
+ return length;
+ }
+
////// states for validateUTF8
private static final int LEAD_BYTE = 0;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
index 083141311b5..ef32cfde3a9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
@@ -53,6 +53,8 @@
import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadHeaderProto;
import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcPayloadOperationProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcResponseHeaderProto;
+import org.apache.hadoop.ipc.protobuf.RpcPayloadHeaderProtos.RpcStatusProto;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableUtils;
@@ -845,24 +847,24 @@ private void receiveResponse() {
touch();
try {
- int id = in.readInt(); // try to read an id
-
+ RpcResponseHeaderProto response =
+ RpcResponseHeaderProto.parseDelimitedFrom(in);
+ int callId = response.getCallId();
if (LOG.isDebugEnabled())
- LOG.debug(getName() + " got value #" + id);
+ LOG.debug(getName() + " got value #" + callId);
- Call call = calls.get(id);
-
- int state = in.readInt(); // read call status
- if (state == Status.SUCCESS.state) {
+ Call call = calls.get(callId);
+ RpcStatusProto status = response.getStatus();
+ if (status == RpcStatusProto.SUCCESS) {
Writable value = ReflectionUtils.newInstance(valueClass, conf);
value.readFields(in); // read value
call.setRpcResponse(value);
- calls.remove(id);
- } else if (state == Status.ERROR.state) {
+ calls.remove(callId);
+ } else if (status == RpcStatusProto.ERROR) {
call.setException(new RemoteException(WritableUtils.readString(in),
WritableUtils.readString(in)));
- calls.remove(id);
- } else if (state == Status.FATAL.state) {
+ calls.remove(callId);
+ } else if (status == RpcStatusProto.FATAL) {
// Close the connection
markClosed(new RemoteException(WritableUtils.readString(in),
WritableUtils.readString(in)));
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
index 69900421fa2..3173ad07ba0 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
@@ -217,7 +217,7 @@ public static RpcInvoker getRpcInvoker(RPC.RpcKind rpcKind) {
public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+Server.class.getName());
private static final String AUTH_FAILED_FOR = "Auth failed for ";
- private static final String AUTH_SUCCESSFULL_FOR = "Auth successfull for ";
+ private static final String AUTH_SUCCESSFUL_FOR = "Auth successful for ";
private static final ThreadLocal SERVER = new ThreadLocal();
@@ -1234,7 +1234,7 @@ public Object run() throws SaslException {
LOG.debug("SASL server successfully authenticated client: " + user);
}
rpcMetrics.incrAuthenticationSuccesses();
- AUDITLOG.info(AUTH_SUCCESSFULL_FOR + user);
+ AUDITLOG.info(AUTH_SUCCESSFUL_FOR + user);
saslContextEstablished = true;
}
} else {
@@ -1339,7 +1339,7 @@ public int readAndProcess() throws IOException, InterruptedException {
+ CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION
+ ") is configured as simple. Please configure another method "
+ "like kerberos or digest.");
- setupResponse(authFailedResponse, authFailedCall, Status.FATAL,
+ setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL,
null, ae.getClass().getName(), ae.getMessage());
responder.doRespond(authFailedCall);
throw ae;
@@ -1420,7 +1420,7 @@ private void setupBadVersionResponse(int clientVersion) throws IOException {
Call fakeCall = new Call(-1, null, this);
// Versions 3 and greater can interpret this exception
// response in the same manner
- setupResponse(buffer, fakeCall, Status.FATAL,
+ setupResponseOldVersionFatal(buffer, fakeCall,
null, VersionMismatch.class.getName(), errMsg);
responder.doRespond(fakeCall);
@@ -1443,7 +1443,7 @@ private void respondUnsupportedSerialization(IpcSerializationType st) throws IOE
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
Call fakeCall = new Call(-1, null, this);
- setupResponse(buffer, fakeCall, Status.FATAL, null,
+ setupResponse(buffer, fakeCall, RpcStatusProto.FATAL, null,
IpcException.class.getName(), errMsg);
responder.doRespond(fakeCall);
}
@@ -1579,7 +1579,7 @@ private void processData(byte[] buf) throws IOException, InterruptedException {
new Call(header.getCallId(), null, this);
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
- setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
+ setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
IOException.class.getName(),
"Unknown rpc kind " + header.getRpcKind());
responder.doRespond(readParamsFailedCall);
@@ -1597,7 +1597,7 @@ private void processData(byte[] buf) throws IOException, InterruptedException {
new Call(header.getCallId(), null, this);
ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
- setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
+ setupResponse(responseBuffer, readParamsFailedCall, RpcStatusProto.FATAL, null,
t.getClass().getName(),
"IPC server unable to read call parameters: " + t.getMessage());
responder.doRespond(readParamsFailedCall);
@@ -1627,7 +1627,7 @@ private boolean authorizeConnection() throws IOException {
rpcMetrics.incrAuthorizationSuccesses();
} catch (AuthorizationException ae) {
rpcMetrics.incrAuthorizationFailures();
- setupResponse(authFailedResponse, authFailedCall, Status.FATAL, null,
+ setupResponse(authFailedResponse, authFailedCall, RpcStatusProto.FATAL, null,
ae.getClass().getName(), ae.getMessage());
responder.doRespond(authFailedCall);
return false;
@@ -1725,8 +1725,8 @@ public Writable run() throws Exception {
// responder.doResponse() since setupResponse may use
// SASL to encrypt response data and SASL enforces
// its own message ordering.
- setupResponse(buf, call, (error == null) ? Status.SUCCESS
- : Status.ERROR, value, errorClass, error);
+ setupResponse(buf, call, (error == null) ? RpcStatusProto.SUCCESS
+ : RpcStatusProto.ERROR, value, errorClass, error);
// Discard the large buf and reset it back to smaller size
// to free up heap
@@ -1859,40 +1859,79 @@ private void closeConnection(Connection connection) {
/**
* Setup response for the IPC Call.
*
- * @param response buffer to serialize the response into
+ * @param responseBuf buffer to serialize the response into
* @param call {@link Call} to which we are setting up the response
- * @param status {@link Status} of the IPC call
+ * @param status of the IPC call
* @param rv return value for the IPC Call, if the call was successful
* @param errorClass error class, if the the call failed
* @param error error message, if the call failed
* @throws IOException
*/
- private void setupResponse(ByteArrayOutputStream response,
- Call call, Status status,
+ private void setupResponse(ByteArrayOutputStream responseBuf,
+ Call call, RpcStatusProto status,
Writable rv, String errorClass, String error)
throws IOException {
- response.reset();
- DataOutputStream out = new DataOutputStream(response);
- out.writeInt(call.callId); // write call id
- out.writeInt(status.state); // write status
+ responseBuf.reset();
+ DataOutputStream out = new DataOutputStream(responseBuf);
+ RpcResponseHeaderProto.Builder response =
+ RpcResponseHeaderProto.newBuilder();
+ response.setCallId(call.callId);
+ response.setStatus(status);
- if (status == Status.SUCCESS) {
+
+ if (status == RpcStatusProto.SUCCESS) {
try {
+ response.build().writeDelimitedTo(out);
rv.write(out);
} catch (Throwable t) {
LOG.warn("Error serializing call response for call " + call, t);
// Call back to same function - this is OK since the
// buffer is reset at the top, and since status is changed
// to ERROR it won't infinite loop.
- setupResponse(response, call, Status.ERROR,
+ setupResponse(responseBuf, call, RpcStatusProto.ERROR,
null, t.getClass().getName(),
StringUtils.stringifyException(t));
return;
}
} else {
+ if (status == RpcStatusProto.FATAL) {
+ response.setServerIpcVersionNum(Server.CURRENT_VERSION);
+ }
+ response.build().writeDelimitedTo(out);
WritableUtils.writeString(out, errorClass);
WritableUtils.writeString(out, error);
}
+ if (call.connection.useWrap) {
+ wrapWithSasl(responseBuf, call);
+ }
+ call.setResponse(ByteBuffer.wrap(responseBuf.toByteArray()));
+ }
+
+ /**
+ * Setup response for the IPC Call on Fatal Error from a
+ * client that is using old version of Hadoop.
+ * The response is serialized using the previous protocol's response
+ * layout.
+ *
+ * @param response buffer to serialize the response into
+ * @param call {@link Call} to which we are setting up the response
+ * @param rv return value for the IPC Call, if the call was successful
+ * @param errorClass error class, if the the call failed
+ * @param error error message, if the call failed
+ * @throws IOException
+ */
+ private void setupResponseOldVersionFatal(ByteArrayOutputStream response,
+ Call call,
+ Writable rv, String errorClass, String error)
+ throws IOException {
+ final int OLD_VERSION_FATAL_STATUS = -1;
+ response.reset();
+ DataOutputStream out = new DataOutputStream(response);
+ out.writeInt(call.callId); // write call id
+ out.writeInt(OLD_VERSION_FATAL_STATUS); // write FATAL_STATUS
+ WritableUtils.writeString(out, errorClass);
+ WritableUtils.writeString(out, error);
+
if (call.connection.useWrap) {
wrapWithSasl(response, call);
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java
deleted file mode 100644
index 16fd871ffa6..00000000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Status.java
+++ /dev/null
@@ -1,32 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-/**
- * Status of a Hadoop IPC call.
- */
-enum Status {
- SUCCESS (0),
- ERROR (1),
- FATAL (-1);
-
- int state;
- private Status(int state) {
- this.state = state;
- }
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
index cc46aacd22b..8dc83a3c716 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/jmx/JMXJsonServlet.java
@@ -148,9 +148,8 @@ public void init() throws ServletException {
@Override
public void doGet(HttpServletRequest request, HttpServletResponse response) {
try {
- // Do the authorization
- if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
- response)) {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
return;
}
JsonGenerator jg = null;
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
index 92c342108d1..af469f9a34d 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics/MetricsServlet.java
@@ -106,9 +106,8 @@ Map>> makeMap(
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
- // Do the authorization
- if (!HttpServer.hasAdministratorAccess(getServletContext(), request,
- response)) {
+ if (!HttpServer.isInstrumentationAccessAllowed(getServletContext(),
+ request, response)) {
return;
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
index d6bf5d92c3d..ac48a08da72 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
@@ -140,7 +140,7 @@ public static SocketFactory getSocketFactoryFromProperty(
/**
* Util method to build socket addr from either:
- * :
+ * :
* ://:/
*/
public static InetSocketAddress createSocketAddr(String target) {
@@ -150,7 +150,7 @@ public static InetSocketAddress createSocketAddr(String target) {
/**
* Util method to build socket addr from either:
*
- * :
+ * :
* ://:/
*/
public static InetSocketAddress createSocketAddr(String target,
@@ -375,53 +375,44 @@ public static InetSocketAddress getConnectAddress(InetSocketAddress addr) {
}
/**
- * Same as getInputStream(socket, socket.getSoTimeout()).
+ * Same as getInputStream(socket, socket.getSoTimeout()).
+ *
*
- * From documentation for {@link #getInputStream(Socket, long)}:
- * Returns InputStream for the socket. If the socket has an associated
- * SocketChannel then it returns a
- * {@link SocketInputStream} with the given timeout. If the socket does not
- * have a channel, {@link Socket#getInputStream()} is returned. In the later
- * case, the timeout argument is ignored and the timeout set with
- * {@link Socket#setSoTimeout(int)} applies for reads.
- *
- * Any socket created using socket factories returned by {@link NetUtils},
- * must use this interface instead of {@link Socket#getInputStream()}.
- *
* @see #getInputStream(Socket, long)
- *
- * @param socket
- * @return InputStream for reading from the socket.
- * @throws IOException
*/
- public static InputStream getInputStream(Socket socket)
+ public static SocketInputWrapper getInputStream(Socket socket)
throws IOException {
return getInputStream(socket, socket.getSoTimeout());
}
-
+
/**
- * Returns InputStream for the socket. If the socket has an associated
- * SocketChannel then it returns a
- * {@link SocketInputStream} with the given timeout. If the socket does not
- * have a channel, {@link Socket#getInputStream()} is returned. In the later
- * case, the timeout argument is ignored and the timeout set with
- * {@link Socket#setSoTimeout(int)} applies for reads.
+ * Return a {@link SocketInputWrapper} for the socket and set the given
+ * timeout. If the socket does not have an associated channel, then its socket
+ * timeout will be set to the specified value. Otherwise, a
+ * {@link SocketInputStream} will be created which reads with the configured
+ * timeout.
*
- * Any socket created using socket factories returned by {@link NetUtils},
+ * Any socket created using socket factories returned by {@link #NetUtils},
* must use this interface instead of {@link Socket#getInputStream()}.
- *
+ *
+ * In general, this should be called only once on each socket: see the note
+ * in {@link SocketInputWrapper#setTimeout(long)} for more information.
+ *
* @see Socket#getChannel()
*
* @param socket
- * @param timeout timeout in milliseconds. This may not always apply. zero
- * for waiting as long as necessary.
- * @return InputStream for reading from the socket.
+ * @param timeout timeout in milliseconds. zero for waiting as
+ * long as necessary.
+ * @return SocketInputWrapper for reading from the socket.
* @throws IOException
*/
- public static InputStream getInputStream(Socket socket, long timeout)
+ public static SocketInputWrapper getInputStream(Socket socket, long timeout)
throws IOException {
- return (socket.getChannel() == null) ?
- socket.getInputStream() : new SocketInputStream(socket, timeout);
+ InputStream stm = (socket.getChannel() == null) ?
+ socket.getInputStream() : new SocketInputStream(socket);
+ SocketInputWrapper w = new SocketInputWrapper(socket, stm);
+ w.setTimeout(timeout);
+ return w;
}
/**
@@ -503,7 +494,7 @@ public static void connect(Socket socket,
* also takes a local address and port to bind the socket to.
*
* @param socket
- * @param address the remote address
+ * @param endpoint the remote address
* @param localAddr the local address to bind the socket to
* @param timeout timeout in milliseconds
*/
@@ -558,16 +549,11 @@ public static void connect(Socket socket,
* @return its IP address in the string format
*/
public static String normalizeHostName(String name) {
- if (Character.digit(name.charAt(0), 10) != -1) { // it is an IP
+ try {
+ return InetAddress.getByName(name).getHostAddress();
+ } catch (UnknownHostException e) {
return name;
- } else {
- try {
- InetAddress ipAddress = InetAddress.getByName(name);
- return ipAddress.getHostAddress();
- } catch (UnknownHostException e) {
- return name;
- }
- }
+ }
}
/**
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
index e51602ff058..18874ecf91b 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketIOWithTimeout.java
@@ -247,6 +247,10 @@ void waitForIO(int ops) throws IOException {
ops));
}
}
+
+ public void setTimeout(long timeoutMs) {
+ this.timeout = timeoutMs;
+ }
private static String timeoutExceptionString(SelectableChannel channel,
long timeout, int ops) {
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java
index ef8c02b7dda..a0b0c3ed0f9 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputStream.java
@@ -28,9 +28,6 @@
import java.nio.channels.SelectableChannel;
import java.nio.channels.SelectionKey;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
/**
* This implements an input stream that can have a timeout while reading.
* This sets non-blocking flag on the socket channel.
@@ -40,9 +37,7 @@
* IllegalBlockingModeException.
* Please use {@link SocketOutputStream} for writing.
*/
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Unstable
-public class SocketInputStream extends InputStream
+class SocketInputStream extends InputStream
implements ReadableByteChannel {
private Reader reader;
@@ -171,4 +166,8 @@ public int read(ByteBuffer dst) throws IOException {
public void waitForReadable() throws IOException {
reader.waitForIO(SelectionKey.OP_READ);
}
+
+ public void setTimeout(long timeoutMs) {
+ reader.setTimeout(timeoutMs);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java
new file mode 100644
index 00000000000..f5cbe17519d
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/SocketInputWrapper.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.net;
+
+import java.io.FilterInputStream;
+
+import java.io.InputStream;
+import java.net.Socket;
+import java.net.SocketException;
+import java.nio.channels.ReadableByteChannel;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+import com.google.common.base.Preconditions;
+
+/**
+ * A wrapper stream around a socket which allows setting of its timeout. If the
+ * socket has a channel, this uses non-blocking IO via the package-private
+ * {@link SocketInputStream} implementation. Otherwise, timeouts are managed by
+ * setting the underlying socket timeout itself.
+ */
+@InterfaceAudience.LimitedPrivate("HDFS")
+@InterfaceStability.Unstable
+public class SocketInputWrapper extends FilterInputStream {
+ private final Socket socket;
+ private final boolean hasChannel;
+
+ SocketInputWrapper(Socket s, InputStream is) {
+ super(is);
+ this.socket = s;
+ this.hasChannel = s.getChannel() != null;
+ if (hasChannel) {
+ Preconditions.checkArgument(is instanceof SocketInputStream,
+ "Expected a SocketInputStream when there is a channel. " +
+ "Got: %s", is);
+ }
+ }
+
+ /**
+ * Set the timeout for reads from this stream.
+ *
+ * Note: the behavior here can differ subtly depending on whether the
+ * underlying socket has an associated Channel. In particular, if there is no
+ * channel, then this call will affect the socket timeout for all
+ * readers of this socket. If there is a channel, then this call will affect
+ * the timeout only for this stream. As such, it is recommended to
+ * only create one {@link SocketInputWrapper} instance per socket.
+ *
+ * @param timeoutMs
+ * the new timeout, 0 for no timeout
+ * @throws SocketException
+ * if the timeout cannot be set
+ */
+ public void setTimeout(long timeoutMs) throws SocketException {
+ if (hasChannel) {
+ ((SocketInputStream)in).setTimeout(timeoutMs);
+ } else {
+ socket.setSoTimeout((int)timeoutMs);
+ }
+ }
+
+ /**
+ * @return an underlying ReadableByteChannel implementation.
+ * @throws IllegalStateException if this socket does not have a channel
+ */
+ public ReadableByteChannel getReadableByteChannel() {
+ Preconditions.checkState(hasChannel,
+ "Socket %s does not have a channel",
+ this.socket);
+ return (SocketInputStream)in;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java
deleted file mode 100644
index 625cad52d35..00000000000
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java
+++ /dev/null
@@ -1,232 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership. The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
- */
-package org.apache.hadoop.security;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.net.ServerSocket;
-import java.security.Principal;
-import java.util.Collections;
-import java.util.List;
-import java.util.Random;
-
-import javax.net.ssl.SSLContext;
-import javax.net.ssl.SSLServerSocket;
-import javax.net.ssl.SSLServerSocketFactory;
-import javax.net.ssl.SSLSocket;
-import javax.security.auth.kerberos.KerberosPrincipal;
-import javax.servlet.Filter;
-import javax.servlet.FilterChain;
-import javax.servlet.FilterConfig;
-import javax.servlet.ServletException;
-import javax.servlet.ServletRequest;
-import javax.servlet.ServletResponse;
-import javax.servlet.http.HttpServletRequest;
-import javax.servlet.http.HttpServletRequestWrapper;
-import javax.servlet.http.HttpServletResponse;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.mortbay.io.EndPoint;
-import org.mortbay.jetty.HttpSchemes;
-import org.mortbay.jetty.Request;
-import org.mortbay.jetty.security.ServletSSL;
-import org.mortbay.jetty.security.SslSocketConnector;
-
-/**
- * Extend Jetty's {@link SslSocketConnector} to optionally also provide
- * Kerberos5ized SSL sockets. The only change in behavior from superclass
- * is that we no longer honor requests to turn off NeedAuthentication when
- * running with Kerberos support.
- */
-public class Krb5AndCertsSslSocketConnector extends SslSocketConnector {
- public static final List KRB5_CIPHER_SUITES =
- Collections.unmodifiableList(Collections.singletonList(
- "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"));
- static {
- SecurityUtil.initKrb5CipherSuites();
- }
-
- private static final Log LOG = LogFactory
- .getLog(Krb5AndCertsSslSocketConnector.class);
-
- private static final String REMOTE_PRINCIPAL = "remote_principal";
-
- public enum MODE {KRB, CERTS, BOTH} // Support Kerberos, certificates or both?
-
- private final boolean useKrb;
- private final boolean useCerts;
-
- public Krb5AndCertsSslSocketConnector() {
- super();
- useKrb = true;
- useCerts = false;
-
- setPasswords();
- }
-
- public Krb5AndCertsSslSocketConnector(MODE mode) {
- super();
- useKrb = mode == MODE.KRB || mode == MODE.BOTH;
- useCerts = mode == MODE.CERTS || mode == MODE.BOTH;
- setPasswords();
- logIfDebug("useKerb = " + useKrb + ", useCerts = " + useCerts);
- }
-
- // If not using Certs, set passwords to random gibberish or else
- // Jetty will actually prompt the user for some.
- private void setPasswords() {
- if(!useCerts) {
- Random r = new Random();
- System.setProperty("jetty.ssl.password", String.valueOf(r.nextLong()));
- System.setProperty("jetty.ssl.keypassword", String.valueOf(r.nextLong()));
- }
- }
-
- @Override
- protected SSLServerSocketFactory createFactory() throws Exception {
- if(useCerts)
- return super.createFactory();
-
- SSLContext context = super.getProvider()==null
- ? SSLContext.getInstance(super.getProtocol())
- :SSLContext.getInstance(super.getProtocol(), super.getProvider());
- context.init(null, null, null);
-
- return context.getServerSocketFactory();
- }
-
- /* (non-Javadoc)
- * @see org.mortbay.jetty.security.SslSocketConnector#newServerSocket(java.lang.String, int, int)
- */
- @Override
- protected ServerSocket newServerSocket(String host, int port, int backlog)
- throws IOException {
- logIfDebug("Creating new KrbServerSocket for: " + host);
- SSLServerSocket ss = null;
-
- if(useCerts) // Get the server socket from the SSL super impl
- ss = (SSLServerSocket)super.newServerSocket(host, port, backlog);
- else { // Create a default server socket
- try {
- ss = (SSLServerSocket)(host == null
- ? createFactory().createServerSocket(port, backlog) :
- createFactory().createServerSocket(port, backlog, InetAddress.getByName(host)));
- } catch (Exception e)
- {
- LOG.warn("Could not create KRB5 Listener", e);
- throw new IOException("Could not create KRB5 Listener: " + e.toString());
- }
- }
-
- // Add Kerberos ciphers to this socket server if needed.
- if(useKrb) {
- ss.setNeedClientAuth(true);
- String [] combined;
- if(useCerts) { // combine the cipher suites
- String[] certs = ss.getEnabledCipherSuites();
- combined = new String[certs.length + KRB5_CIPHER_SUITES.size()];
- System.arraycopy(certs, 0, combined, 0, certs.length);
- System.arraycopy(KRB5_CIPHER_SUITES.toArray(new String[0]), 0, combined,
- certs.length, KRB5_CIPHER_SUITES.size());
- } else { // Just enable Kerberos auth
- combined = KRB5_CIPHER_SUITES.toArray(new String[0]);
- }
-
- ss.setEnabledCipherSuites(combined);
- }
-
- return ss;
- };
-
- @Override
- public void customize(EndPoint endpoint, Request request) throws IOException {
- if(useKrb) { // Add Kerberos-specific info
- SSLSocket sslSocket = (SSLSocket)endpoint.getTransport();
- Principal remotePrincipal = sslSocket.getSession().getPeerPrincipal();
- logIfDebug("Remote principal = " + remotePrincipal);
- request.setScheme(HttpSchemes.HTTPS);
- request.setAttribute(REMOTE_PRINCIPAL, remotePrincipal);
-
- if(!useCerts) { // Add extra info that would have been added by super
- String cipherSuite = sslSocket.getSession().getCipherSuite();
- Integer keySize = Integer.valueOf(ServletSSL.deduceKeyLength(cipherSuite));;
-
- request.setAttribute("javax.servlet.request.cipher_suite", cipherSuite);
- request.setAttribute("javax.servlet.request.key_size", keySize);
- }
- }
-
- if(useCerts) super.customize(endpoint, request);
- }
-
- private void logIfDebug(String s) {
- if(LOG.isDebugEnabled())
- LOG.debug(s);
- }
-
- /**
- * Filter that takes the Kerberos principal identified in the
- * {@link Krb5AndCertsSslSocketConnector} and provides it the to the servlet
- * at runtime, setting the principal and short name.
- */
- public static class Krb5SslFilter implements Filter {
- @Override
- public void doFilter(ServletRequest req, ServletResponse resp,
- FilterChain chain) throws IOException, ServletException {
- final Principal princ =
- (Principal)req.getAttribute(Krb5AndCertsSslSocketConnector.REMOTE_PRINCIPAL);
-
- if(princ == null || !(princ instanceof KerberosPrincipal)) {
- // Should never actually get here, since should be rejected at socket
- // level.
- LOG.warn("User not authenticated via kerberos from " + req.getRemoteAddr());
- ((HttpServletResponse)resp).sendError(HttpServletResponse.SC_FORBIDDEN,
- "User not authenticated via Kerberos");
- return;
- }
-
- // Provide principal information for servlet at runtime
- ServletRequest wrapper =
- new HttpServletRequestWrapper((HttpServletRequest) req) {
- @Override
- public Principal getUserPrincipal() {
- return princ;
- }
-
- /*
- * Return the full name of this remote user.
- * @see javax.servlet.http.HttpServletRequestWrapper#getRemoteUser()
- */
- @Override
- public String getRemoteUser() {
- return princ.getName();
- }
- };
-
- chain.doFilter(wrapper, resp);
- }
-
- @Override
- public void init(FilterConfig arg0) throws ServletException {
- /* Nothing to do here */
- }
-
- @Override
- public void destroy() { /* Nothing to do here */ }
- }
-}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
index 63683bf7209..8189cfdb279 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
@@ -17,14 +17,11 @@
package org.apache.hadoop.security;
import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URL;
+import java.net.URLConnection;
import java.net.UnknownHostException;
import java.security.AccessController;
import java.security.PrivilegedAction;
@@ -45,6 +42,8 @@
import org.apache.hadoop.fs.CommonConfigurationKeys;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
+import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenInfo;
@@ -134,79 +133,6 @@ protected static boolean isOriginalTGT(KerberosTicket ticket) {
return isTGSPrincipal(ticket.getServer());
}
- /**
- * Explicitly pull the service ticket for the specified host. This solves a
- * problem with Java's Kerberos SSL problem where the client cannot
- * authenticate against a cross-realm service. It is necessary for clients
- * making kerberized https requests to call this method on the target URL
- * to ensure that in a cross-realm environment the remote host will be
- * successfully authenticated.
- *
- * This method is internal to Hadoop and should not be used by other
- * applications. This method should not be considered stable or open:
- * it will be removed when the Java behavior is changed.
- *
- * @param remoteHost Target URL the krb-https client will access
- * @throws IOException if the service ticket cannot be retrieved
- */
- public static void fetchServiceTicket(URL remoteHost) throws IOException {
- if(!UserGroupInformation.isSecurityEnabled())
- return;
-
- String serviceName = "host/" + remoteHost.getHost();
- if (LOG.isDebugEnabled())
- LOG.debug("Fetching service ticket for host at: " + serviceName);
- Object serviceCred = null;
- Method credsToTicketMeth;
- Class> krb5utilClass;
- try {
- Class> principalClass;
- Class> credentialsClass;
-
- if (System.getProperty("java.vendor").contains("IBM")) {
- principalClass = Class.forName("com.ibm.security.krb5.PrincipalName");
-
- credentialsClass = Class.forName("com.ibm.security.krb5.Credentials");
- krb5utilClass = Class.forName("com.ibm.security.jgss.mech.krb5");
- } else {
- principalClass = Class.forName("sun.security.krb5.PrincipalName");
- credentialsClass = Class.forName("sun.security.krb5.Credentials");
- krb5utilClass = Class.forName("sun.security.jgss.krb5.Krb5Util");
- }
- @SuppressWarnings("rawtypes")
- Constructor principalConstructor = principalClass.getConstructor(String.class,
- int.class);
- Field KRB_NT_SRV_HST = principalClass.getDeclaredField("KRB_NT_SRV_HST");
- Method acquireServiceCredsMeth =
- credentialsClass.getDeclaredMethod("acquireServiceCreds",
- String.class, credentialsClass);
- Method ticketToCredsMeth = krb5utilClass.getDeclaredMethod("ticketToCreds",
- KerberosTicket.class);
- credsToTicketMeth = krb5utilClass.getDeclaredMethod("credsToTicket",
- credentialsClass);
-
- Object principal = principalConstructor.newInstance(serviceName,
- KRB_NT_SRV_HST.get(principalClass));
-
- serviceCred = acquireServiceCredsMeth.invoke(credentialsClass,
- principal.toString(),
- ticketToCredsMeth.invoke(krb5utilClass, getTgtFromSubject()));
- } catch (Exception e) {
- throw new IOException("Can't get service ticket for: "
- + serviceName, e);
- }
- if (serviceCred == null) {
- throw new IOException("Can't get service ticket for " + serviceName);
- }
- try {
- Subject.getSubject(AccessController.getContext()).getPrivateCredentials()
- .add(credsToTicketMeth.invoke(krb5utilClass, serviceCred));
- } catch (Exception e) {
- throw new IOException("Can't get service ticket for: "
- + serviceName, e);
- }
- }
-
/**
* Convert Kerberos principal name pattern to valid Kerberos principal
* names. It replaces hostname pattern with hostname, which should be
@@ -513,6 +439,30 @@ public static T doAsLoginUserOrFatal(PrivilegedAction action) {
}
}
+ /**
+ * Open a (if need be) secure connection to a URL in a secure environment
+ * that is using SPNEGO to authenticate its URLs. All Namenode and Secondary
+ * Namenode URLs that are protected via SPNEGO should be accessed via this
+ * method.
+ *
+ * @param url to authenticate via SPNEGO.
+ * @return A connection that has been authenticated via SPNEGO
+ * @throws IOException If unable to authenticate via SPNEGO
+ */
+ public static URLConnection openSecureHttpConnection(URL url) throws IOException {
+ if(!UserGroupInformation.isSecurityEnabled()) {
+ return url.openConnection();
+ }
+
+ AuthenticatedURL.Token token = new AuthenticatedURL.Token();
+ try {
+ return new AuthenticatedURL().openConnection(url, token);
+ } catch (AuthenticationException e) {
+ throw new IOException("Exception trying to open authenticated connection to "
+ + url, e);
+ }
+ }
+
/**
* Resolves a host subject to the security requirements determined by
* hadoop.security.token.service.use_ip.
@@ -664,10 +614,4 @@ void setSearchDomains(String ... domains) {
}
}
- public static void initKrb5CipherSuites() {
- if (UserGroupInformation.isSecurityEnabled()) {
- System.setProperty("https.cipherSuites",
- Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
- }
- }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
index 3a9ad0b5f5b..d17d065bf81 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
@@ -59,7 +59,7 @@ public class ServiceAuthorizationManager {
public static final Log AUDITLOG =
LogFactory.getLog("SecurityLogger."+ServiceAuthorizationManager.class.getName());
- private static final String AUTHZ_SUCCESSFULL_FOR = "Authorization successfull for ";
+ private static final String AUTHZ_SUCCESSFUL_FOR = "Authorization successful for ";
private static final String AUTHZ_FAILED_FOR = "Authorization failed for ";
@@ -108,7 +108,7 @@ public void authorize(UserGroupInformation user,
" is not authorized for protocol " + protocol +
", expected client Kerberos principal is " + clientPrincipal);
}
- AUDITLOG.info(AUTHZ_SUCCESSFULL_FOR + user + " for protocol="+protocol);
+ AUDITLOG.info(AUTHZ_SUCCESSFUL_FOR + user + " for protocol="+protocol);
}
public synchronized void refresh(Configuration conf,
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
index e95ade860b0..bbddf6fdc78 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/Token.java
@@ -18,10 +18,15 @@
package org.apache.hadoop.security.token;
+import com.google.common.collect.Maps;
+
+import java.io.ByteArrayInputStream;
import java.io.DataInput;
+import java.io.DataInputStream;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Arrays;
+import java.util.Map;
import java.util.ServiceLoader;
import org.apache.commons.codec.binary.Base64;
@@ -37,6 +42,7 @@
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.io.WritableUtils;
+import org.apache.hadoop.util.ReflectionUtils;
/**
* The client-side form of the token.
@@ -45,6 +51,9 @@
@InterfaceStability.Evolving
public class Token implements Writable {
public static final Log LOG = LogFactory.getLog(Token.class);
+
+ private static Map> tokenKindMap;
+
private byte[] identifier;
private byte[] password;
private Text kind;
@@ -100,13 +109,49 @@ public Token(Token other) {
}
/**
- * Get the token identifier
- * @return the token identifier
+ * Get the token identifier's byte representation
+ * @return the token identifier's byte representation
*/
public byte[] getIdentifier() {
return identifier;
}
+ private static synchronized Class extends TokenIdentifier>
+ getClassForIdentifier(Text kind) {
+ if (tokenKindMap == null) {
+ tokenKindMap = Maps.newHashMap();
+ for (TokenIdentifier id : ServiceLoader.load(TokenIdentifier.class)) {
+ tokenKindMap.put(id.getKind(), id.getClass());
+ }
+ }
+ Class extends TokenIdentifier> cls = tokenKindMap.get(kind);
+ if (cls == null) {
+ LOG.warn("Cannot find class for token kind " + kind);
+ return null;
+ }
+ return cls;
+ }
+
+ /**
+ * Get the token identifier object, or null if it could not be constructed
+ * (because the class could not be loaded, for example).
+ * @return the token identifier, or null
+ * @throws IOException
+ */
+ @SuppressWarnings("unchecked")
+ public T decodeIdentifier() throws IOException {
+ Class extends TokenIdentifier> cls = getClassForIdentifier(getKind());
+ if (cls == null) {
+ return null;
+ }
+ TokenIdentifier tokenIdentifier = ReflectionUtils.newInstance(cls, null);
+ ByteArrayInputStream buf = new ByteArrayInputStream(identifier);
+ DataInputStream in = new DataInputStream(buf);
+ tokenIdentifier.readFields(in);
+ in.close();
+ return (T) tokenIdentifier;
+ }
+
/**
* Get the token password/secret
* @return the token password/secret
@@ -260,16 +305,31 @@ private static void addBinaryBuffer(StringBuilder buffer, byte[] bytes) {
buffer.append(num);
}
}
+
+ private void identifierToString(StringBuilder buffer) {
+ T id = null;
+ try {
+ id = decodeIdentifier();
+ } catch (IOException e) {
+ // handle in the finally block
+ } finally {
+ if (id != null) {
+ buffer.append("(").append(id).append(")");
+ } else {
+ addBinaryBuffer(buffer, identifier);
+ }
+ }
+ }
@Override
public String toString() {
StringBuilder buffer = new StringBuilder();
- buffer.append("Ident: ");
- addBinaryBuffer(buffer, identifier);
- buffer.append(", Kind: ");
+ buffer.append("Kind: ");
buffer.append(kind.toString());
buffer.append(", Service: ");
buffer.append(service.toString());
+ buffer.append(", Ident: ");
+ identifierToString(buffer);
return buffer.toString();
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
index dd68c4d74b1..09a272317f3 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionUtil.java
@@ -22,11 +22,20 @@
import org.apache.hadoop.classification.InterfaceAudience;
+import com.google.common.collect.ComparisonChain;
+
@InterfaceAudience.Private
public abstract class VersionUtil {
private static final Pattern COMPONENT_GROUPS = Pattern.compile("(\\d+)|(\\D+)");
+ /**
+ * Suffix added by maven for nightly builds and other snapshot releases.
+ * These releases are considered to precede the non-SNAPSHOT version
+ * with the same version number.
+ */
+ private static final String SNAPSHOT_SUFFIX = "-SNAPSHOT";
+
/**
* This function splits the two versions on "." and performs a
* naturally-ordered comparison of the resulting components. For example, the
@@ -48,6 +57,11 @@ public abstract class VersionUtil {
* between the two versions, then the version with fewer components is
* considered to precede the version with more components.
*
+ * In addition to the above rules, there is one special case: maven SNAPSHOT
+ * releases are considered to precede a non-SNAPSHOT release with an
+ * otherwise identical version number. For example, 2.0-SNAPSHOT precedes
+ * 2.0.
+ *
* This function returns a negative integer if version1 precedes version2, a
* positive integer if version2 precedes version1, and 0 if and only if the
* two versions' components are identical in value and cardinality.
@@ -61,6 +75,11 @@ public abstract class VersionUtil {
* versions are equal.
*/
public static int compareVersions(String version1, String version2) {
+ boolean isSnapshot1 = version1.endsWith(SNAPSHOT_SUFFIX);
+ boolean isSnapshot2 = version2.endsWith(SNAPSHOT_SUFFIX);
+ version1 = stripSnapshotSuffix(version1);
+ version2 = stripSnapshotSuffix(version2);
+
String[] version1Parts = version1.split("\\.");
String[] version2Parts = version2.split("\\.");
@@ -87,9 +106,21 @@ public static int compareVersions(String version1, String version2) {
return component1.length() - component2.length();
}
}
- return version1Parts.length - version2Parts.length;
+
+ return ComparisonChain.start()
+ .compare(version1Parts.length, version2Parts.length)
+ .compare(isSnapshot2, isSnapshot1)
+ .result();
}
+ private static String stripSnapshotSuffix(String version) {
+ if (version.endsWith(SNAPSHOT_SUFFIX)) {
+ return version.substring(0, version.length() - SNAPSHOT_SUFFIX.length());
+ } else {
+ return version;
+ }
+ }
+
private static boolean isNumeric(String s) {
try {
Integer.parseInt(s);
diff --git a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4 b/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
index 3e2c013866a..93e05b8148d 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
+++ b/hadoop-common-project/hadoop-common/src/main/native/acinclude.m4
@@ -1,4 +1,4 @@
-# AC_COMPUTE_NEEDED_DSO(LIBRARY, PREPROC_SYMBOL)
+# AC_COMPUTE_NEEDED_DSO(LIBRARY, TEST_PROGRAM, PREPROC_SYMBOL)
# --------------------------------------------------
# Compute the 'actual' dynamic-library used
# for LIBRARY and set it to PREPROC_SYMBOL
@@ -6,7 +6,7 @@ AC_DEFUN([AC_COMPUTE_NEEDED_DSO],
[
AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_libname_$1,
[
- echo 'int main(int argc, char **argv){return 0;}' > conftest.c
+ echo '$2' > conftest.c
if test -z "`${CC} ${LDFLAGS} -o conftest conftest.c -l$1 2>&1`"; then
dnl Try objdump and ldd in that order to get the dynamic library
if test ! -z "`which objdump | grep -v 'no objdump'`"; then
@@ -24,5 +24,5 @@ AC_CACHE_CHECK([Checking for the 'actual' dynamic-library for '-l$1'], ac_cv_lib
rm -f conftest*
]
)
-AC_DEFINE_UNQUOTED($2, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
+AC_DEFINE_UNQUOTED($3, ${ac_cv_libname_$1}, [The 'actual' dynamic-library for '-l$1'])
])# AC_COMPUTE_NEEDED_DSO
diff --git a/hadoop-common-project/hadoop-common/src/main/native/configure.ac b/hadoop-common-project/hadoop-common/src/main/native/configure.ac
index 4f9e63100e7..34408d64182 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/configure.ac
+++ b/hadoop-common-project/hadoop-common/src/main/native/configure.ac
@@ -87,10 +87,20 @@ CPPFLAGS=$cppflags_bak
AC_SUBST([JNI_CPPFLAGS])
dnl Check for zlib headers
-AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
+AC_CHECK_HEADERS([zlib.h zconf.h],
+ AC_COMPUTE_NEEDED_DSO(z,
+ [#include "zlib.h"
+ int main(int argc, char **argv){zlibVersion();return 0;}],
+ HADOOP_ZLIB_LIBRARY),
+ AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
dnl Check for snappy headers
-AC_CHECK_HEADERS([snappy-c.h], AC_COMPUTE_NEEDED_DSO(snappy,HADOOP_SNAPPY_LIBRARY), AC_MSG_WARN(Snappy headers were not found... building without snappy.))
+AC_CHECK_HEADERS([snappy-c.h],
+ AC_COMPUTE_NEEDED_DSO(snappy,
+ [#include "snappy-c.h"
+ int main(int argc, char **argv){snappy_compress(0,0,0,0);return 0;}],
+ HADOOP_SNAPPY_LIBRARY),
+ AC_MSG_WARN(Snappy headers were not found... building without snappy.))
dnl Check for headers needed by the native Group resolution implementation
AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
diff --git a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
index a6ddcc30358..6a92bb2b92a 100644
--- a/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
+++ b/hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c
@@ -70,7 +70,7 @@ Java_org_apache_hadoop_security_JniBasedUnixGroupsNetgroupMapping_getUsersForNet
// set the name of the group for subsequent calls to getnetgrent
// note that we want to end group lokup regardless whether setnetgrent
- // was successfull or not (as long as it was called we need to call
+ // was successful or not (as long as it was called we need to call
// endnetgrent)
setnetgrentCalledFlag = 1;
if(setnetgrent(cgroup) == 1) {
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
index 178fa8162e8..d8c731ec5f2 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh
@@ -48,10 +48,10 @@ done
export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_NAMENODE_OPTS"
+export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_NAMENODE_OPTS"
export HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,RFAS $HADOOP_DATANODE_OPTS"
-export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=INFO,RFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
+export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,RFAS} -Dhdfs.audit.logger=${HDFS_AUDIT_LOGGER:-INFO,NullAppender} $HADOOP_SECONDARYNAMENODE_OPTS"
# The ZKFC does not need a large heap, and keeping it small avoids
# any potential for long GC pauses
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
index ee91f9e73fb..81afa95bae1 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hdfs-site.xml
@@ -128,13 +128,6 @@
-
- dfs.secondary.https.port
- 50490
- The https port where secondary-namenode binds
-
-
-
dfs.datanode.kerberos.principaldn/_HOST@${local.realm}
diff --git a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
index 3470b3ef1b7..63e27cf72f8 100644
--- a/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/packages/templates/conf/log4j.properties
@@ -102,7 +102,7 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
#
#Security appender
#
-hadoop.security.logger=INFO,console
+hadoop.security.logger=INFO,NullAppender
hadoop.security.log.maxfilesize=256MB
hadoop.security.log.maxbackupindex=20
log4j.category.SecurityLogger=${hadoop.security.logger}
@@ -126,7 +126,7 @@ log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
#
# hdfs audit logging
#
-hdfs.audit.logger=INFO,console
+hdfs.audit.logger=INFO,NullAppender
hdfs.audit.log.maxfilesize=256MB
hdfs.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
@@ -141,7 +141,7 @@ log4j.appender.RFAAUDIT.MaxBackupIndex=${hdfs.audit.log.maxbackupindex}
#
# mapred audit logging
#
-mapred.audit.logger=INFO,console
+mapred.audit.logger=INFO,NullAppender
mapred.audit.log.maxfilesize=256MB
mapred.audit.log.maxbackupindex=20
log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
diff --git a/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto b/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
index 42dea3bde3e..50657413012 100644
--- a/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
+++ b/hadoop-common-project/hadoop-common/src/main/proto/RpcPayloadHeader.proto
@@ -19,7 +19,6 @@ option java_package = "org.apache.hadoop.ipc.protobuf";
option java_outer_classname = "RpcPayloadHeaderProtos";
option java_generate_equals_and_hash = true;
-
/**
* This is the rpc payload header. It is sent with every rpc call.
*
@@ -34,8 +33,6 @@ option java_generate_equals_and_hash = true;
*
*/
-
-
/**
* RpcKind determine the rpcEngine and the serialization of the rpc payload
*/
@@ -54,5 +51,27 @@ enum RpcPayloadOperationProto {
message RpcPayloadHeaderProto { // the header for the RpcRequest
optional RpcKindProto rpcKind = 1;
optional RpcPayloadOperationProto rpcOp = 2;
- optional uint32 callId = 3; // each rpc has a callId that is also used in response
+ required uint32 callId = 3; // each rpc has a callId that is also used in response
+}
+
+enum RpcStatusProto {
+ SUCCESS = 0; // RPC succeeded
+ ERROR = 1; // RPC Failed
+ FATAL = 2; // Fatal error - connection is closed
+}
+
+/**
+ * Rpc Response Header
+ * - If successfull then the Respose follows after this header
+ * - length (4 byte int), followed by the response
+ * - If error or fatal - the exception info follow
+ * - length (4 byte int) Class name of exception - UTF-8 string
+ * - length (4 byte int) Stacktrace - UTF-8 string
+ * - if the strings are null then the length is -1
+ * In case of Fatal error then the respose contains the Serverside's IPC version
+ */
+message RpcResponseHeaderProto {
+ required uint32 callId = 1; // callId used in Request
+ required RpcStatusProto status = 2;
+ optional uint32 serverIpcVersionNum = 3; // in case of an fatal IPC error
}
diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
index 7e442325bfd..e7e3c1b2652 100644
--- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
+++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml
@@ -62,6 +62,15 @@
Is service-level authorization enabled?
+
+ hadoop.security.instrumentation.requires.admin
+ false
+
+ Indicates if administrator ACLs are required to access
+ instrumentation servlets (JMX, METRICS, CONF, STACKS).
+
+
+
hadoop.security.authenticationsimple
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java
deleted file mode 100644
index fdc877c210b..00000000000
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestBlockLocation.java
+++ /dev/null
@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import java.io.ByteArrayInputStream;
-import java.io.DataInput;
-import java.io.DataInputStream;
-import java.io.IOException;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.io.DataOutputBuffer;
-
-public class TestBlockLocation extends TestCase {
- // Verify fix of bug identified in HADOOP-6004
- public void testDeserialization() throws IOException {
- // Create a test BlockLocation
- String[] names = {"one", "two" };
- String[] hosts = {"three", "four" };
- String[] topologyPaths = {"five", "six"};
- long offset = 25l;
- long length = 55l;
-
- BlockLocation bl = new BlockLocation(names, hosts, topologyPaths,
- offset, length);
-
- DataOutputBuffer dob = new DataOutputBuffer();
-
- // Serialize it
- try {
- bl.write(dob);
- } catch (IOException e) {
- fail("Unable to serialize data: " + e.getMessage());
- }
-
- byte[] bytes = dob.getData();
- DataInput da = new DataInputStream(new ByteArrayInputStream(bytes));
-
- // Try to re-create the BlockLocation the same way as is done during
- // deserialization
- BlockLocation bl2 = new BlockLocation();
-
- try {
- bl2.readFields(da);
- } catch (IOException e) {
- fail("Unable to deserialize BlockLocation: " + e.getMessage());
- }
-
- // Check that we got back what we started with
- verifyDeserialization(bl2.getHosts(), hosts);
- verifyDeserialization(bl2.getNames(), names);
- verifyDeserialization(bl2.getTopologyPaths(), topologyPaths);
- assertEquals(bl2.getOffset(), offset);
- assertEquals(bl2.getLength(), length);
- }
-
- private void verifyDeserialization(String[] ar1, String[] ar2) {
- assertEquals(ar1.length, ar2.length);
-
- for(int i = 0; i < ar1.length; i++)
- assertEquals(ar1[i], ar2[i]);
- }
-}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
index 6ccc201c55a..604ea78d0fb 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
@@ -18,11 +18,14 @@
package org.apache.hadoop.fs;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem.Statistics;
+
import static org.apache.hadoop.fs.FileSystemTestHelper.*;
import java.io.*;
import static org.junit.Assert.*;
+
import org.junit.Before;
import org.junit.Test;
@@ -233,4 +236,16 @@ public void testBasicDelete() throws IOException {
assertTrue("Did not delete file", fs.delete(file1));
assertTrue("Did not delete non-empty dir", fs.delete(dir1));
}
+
+ @Test
+ public void testStatistics() throws Exception {
+ FileSystem.getLocal(new Configuration());
+ int fileSchemeCount = 0;
+ for (Statistics stats : FileSystem.getAllStatistics()) {
+ if (stats.getScheme().equals("file")) {
+ fileSchemeCount++;
+ }
+ }
+ assertEquals(1, fileSchemeCount);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
index 1de434e3a96..d4740a41fc9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFileSystemBaseTest.java
@@ -71,11 +71,8 @@ public class ViewFileSystemBaseTest {
@Before
public void setUp() throws Exception {
- targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
- // In case previous test was killed before cleanup
- fsTarget.delete(targetTestRoot, true);
+ initializeTargetTestRoot();
- fsTarget.mkdirs(targetTestRoot);
// Make user and data dirs - we creates links to them in the mount table
fsTarget.mkdirs(new Path(targetTestRoot,"user"));
fsTarget.mkdirs(new Path(targetTestRoot,"data"));
@@ -99,7 +96,16 @@ public void tearDown() throws Exception {
fsTarget.delete(FileSystemTestHelper.getTestRootPath(fsTarget), true);
}
+ void initializeTargetTestRoot() throws IOException {
+ targetTestRoot = FileSystemTestHelper.getAbsoluteTestRootPath(fsTarget);
+ // In case previous test was killed before cleanup
+ fsTarget.delete(targetTestRoot, true);
+
+ fsTarget.mkdirs(targetTestRoot);
+ }
+
void setupMountPoints() {
+ ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
ConfigUtil.addLink(conf, "/user", new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/user2", new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/data", new Path(targetTestRoot,"data").toUri());
@@ -121,7 +127,7 @@ public void testGetMountPoints() {
}
int getExpectedMountPoints() {
- return 7;
+ return 8;
}
/**
@@ -166,7 +172,7 @@ public void testGetDelegationTokensWithCredentials() throws IOException {
}
}
}
- Assert.assertEquals(expectedTokenCount / 2, delTokens.size());
+ Assert.assertEquals((expectedTokenCount + 1) / 2, delTokens.size());
}
int getExpectedDelegationTokenCountWithCredentials() {
@@ -309,6 +315,16 @@ public void testOperationsThroughMountLinks() throws IOException {
Assert.assertTrue("Renamed dest should exist as dir in target",
fsTarget.isDirectory(new Path(targetTestRoot,"user/dirFooBar")));
+ // Make a directory under a directory that's mounted from the root of another FS
+ fsView.mkdirs(new Path("/targetRoot/dirFoo"));
+ Assert.assertTrue(fsView.exists(new Path("/targetRoot/dirFoo")));
+ boolean dirFooPresent = false;
+ for (FileStatus fileStatus : fsView.listStatus(new Path("/targetRoot/"))) {
+ if (fileStatus.getPath().getName().equals("dirFoo")) {
+ dirFooPresent = true;
+ }
+ }
+ Assert.assertTrue(dirFooPresent);
}
// rename across mount points that point to same target also fail
@@ -418,7 +434,7 @@ public void testListOnInternalDirsOfMountTable() throws IOException {
}
int getExpectedDirPaths() {
- return 6;
+ return 7;
}
@Test
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
index 8622f02ff6b..7f731de23e9 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/viewfs/ViewFsBaseTest.java
@@ -33,6 +33,7 @@
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.FileContextTestHelper;
+import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.FileContextTestHelper.fileType;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FsConstants;
@@ -77,12 +78,8 @@ public class ViewFsBaseTest {
@Before
public void setUp() throws Exception {
-
- targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
- // In case previous test was killed before cleanup
- fcTarget.delete(targetTestRoot, true);
+ initializeTargetTestRoot();
- fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
// Make user and data dirs - we creates links to them in the mount table
fcTarget.mkdir(new Path(targetTestRoot,"user"),
FileContext.DEFAULT_PERM, true);
@@ -100,6 +97,7 @@ public void setUp() throws Exception {
// Set up the defaultMT in the config with our mount point links
conf = new Configuration();
+ ConfigUtil.addLink(conf, "/targetRoot", targetTestRoot.toUri());
ConfigUtil.addLink(conf, "/user",
new Path(targetTestRoot,"user").toUri());
ConfigUtil.addLink(conf, "/user2",
@@ -118,6 +116,14 @@ public void setUp() throws Exception {
fcView = FileContext.getFileContext(FsConstants.VIEWFS_URI, conf);
// Also try viewfs://default/ - note authority is name of mount table
}
+
+ void initializeTargetTestRoot() throws IOException {
+ targetTestRoot = FileContextTestHelper.getAbsoluteTestRootPath(fcTarget);
+ // In case previous test was killed before cleanup
+ fcTarget.delete(targetTestRoot, true);
+
+ fcTarget.mkdir(targetTestRoot, FileContext.DEFAULT_PERM, true);
+ }
@After
public void tearDown() throws Exception {
@@ -128,7 +134,11 @@ public void tearDown() throws Exception {
public void testGetMountPoints() {
ViewFs viewfs = (ViewFs) fcView.getDefaultFileSystem();
MountPoint[] mountPoints = viewfs.getMountPoints();
- Assert.assertEquals(7, mountPoints.length);
+ Assert.assertEquals(8, mountPoints.length);
+ }
+
+ int getExpectedDelegationTokenCount() {
+ return 0;
}
/**
@@ -140,7 +150,7 @@ public void testGetMountPoints() {
public void testGetDelegationTokens() throws IOException {
List> delTokens =
fcView.getDelegationTokens(new Path("/"), "sanjay");
- Assert.assertEquals(0, delTokens.size());
+ Assert.assertEquals(getExpectedDelegationTokenCount(), delTokens.size());
}
@@ -281,6 +291,19 @@ public void testOperationsThroughMountLinks() throws IOException {
Assert.assertTrue("Renamed dest should exist as dir in target",
isDir(fcTarget,new Path(targetTestRoot,"user/dirFooBar")));
+ // Make a directory under a directory that's mounted from the root of another FS
+ fcView.mkdir(new Path("/targetRoot/dirFoo"), FileContext.DEFAULT_PERM, false);
+ Assert.assertTrue(exists(fcView, new Path("/targetRoot/dirFoo")));
+ boolean dirFooPresent = false;
+ RemoteIterator dirContents = fcView.listStatus(new Path(
+ "/targetRoot/"));
+ while (dirContents.hasNext()) {
+ FileStatus fileStatus = dirContents.next();
+ if (fileStatus.getPath().getName().equals("dirFoo")) {
+ dirFooPresent = true;
+ }
+ }
+ Assert.assertTrue(dirFooPresent);
}
// rename across mount points that point to same target also fail
@@ -358,7 +381,7 @@ public void testListOnInternalDirsOfMountTable() throws IOException {
FileStatus[] dirPaths = fcView.util().listStatus(new Path("/"));
FileStatus fs;
- Assert.assertEquals(6, dirPaths.length);
+ Assert.assertEquals(7, dirPaths.length);
fs = FileContextTestHelper.containsPath(fcView, "/user", dirPaths);
Assert.assertNotNull(fs);
Assert.assertTrue("A mount should appear as symlink", fs.isSymlink());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
index b32d2a2d2cd..6dee7eb7134 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.http;
+import org.apache.hadoop.security.authorize.AccessControlList;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
@@ -70,6 +71,12 @@ public static HttpServer createTestServer(Configuration conf)
return createServer(TEST, conf);
}
+ public static HttpServer createTestServer(Configuration conf, AccessControlList adminsAcl)
+ throws IOException {
+ prepareTestWebapp();
+ return createServer(TEST, conf, adminsAcl);
+ }
+
/**
* Create but do not start the test webapp server. The test webapp dir is
* prepared/checked in advance.
@@ -132,6 +139,11 @@ public static HttpServer createServer(String webapp, Configuration conf)
throws IOException {
return new HttpServer(webapp, "0.0.0.0", 0, true, conf);
}
+
+ public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl)
+ throws IOException {
+ return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl);
+ }
/**
* Create an HttpServer instance for the given webapp
* @param webapp the webapp to work with
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
index bd9c230c50c..a4d5c5a9c4e 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java
@@ -60,7 +60,6 @@
import org.junit.AfterClass;
import org.junit.BeforeClass;
import org.junit.Test;
-import org.mockito.Mock;
import org.mockito.Mockito;
import org.mortbay.util.ajax.JSON;
@@ -360,6 +359,8 @@ public void testAuthorizationOfDefaultServlets() throws Exception {
Configuration conf = new Configuration();
conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
true);
+ conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN,
+ true);
conf.set(HttpServer.FILTER_INITIALIZER_PROPERTY,
DummyFilterInitializer.class.getName());
@@ -468,6 +469,26 @@ public void testHasAdministratorAccess() throws Exception {
}
+ @Test
+ public void testRequiresAuthorizationAccess() throws Exception {
+ Configuration conf = new Configuration();
+ ServletContext context = Mockito.mock(ServletContext.class);
+ Mockito.when(context.getAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE)).thenReturn(conf);
+ HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+ HttpServletResponse response = Mockito.mock(HttpServletResponse.class);
+
+ //requires admin access to instrumentation, FALSE by default
+ Assert.assertTrue(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+
+ //requires admin access to instrumentation, TRUE
+ conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_INSTRUMENTATION_REQUIRES_ADMIN, true);
+ conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, true);
+ AccessControlList acls = Mockito.mock(AccessControlList.class);
+ Mockito.when(acls.isUserAllowed(Mockito.any())).thenReturn(false);
+ Mockito.when(context.getAttribute(HttpServer.ADMINS_ACL)).thenReturn(acls);
+ Assert.assertFalse(HttpServer.isInstrumentationAccessAllowed(context, request, response));
+ }
+
@Test public void testBindAddress() throws Exception {
checkBindAddress("0.0.0.0", 0, false).stop();
// hang onto this one for a bit more testing
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
index a756a57dae7..a86c532badc 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestText.java
@@ -20,6 +20,7 @@
import junit.framework.TestCase;
+import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.charset.CharacterCodingException;
import java.util.Random;
@@ -107,7 +108,6 @@ public void testCoding() throws Exception {
}
}
-
public void testIO() throws Exception {
DataOutputBuffer out = new DataOutputBuffer();
DataInputBuffer in = new DataInputBuffer();
@@ -136,6 +136,40 @@ public void testIO() throws Exception {
assertTrue(before.equals(after2));
}
}
+
+ public void doTestLimitedIO(String str, int strLen) throws IOException {
+ DataOutputBuffer out = new DataOutputBuffer();
+ DataInputBuffer in = new DataInputBuffer();
+
+ out.reset();
+ try {
+ Text.writeString(out, str, strLen);
+ fail("expected writeString to fail when told to write a string " +
+ "that was too long! The string was '" + str + "'");
+ } catch (IOException e) {
+ }
+ Text.writeString(out, str, strLen + 1);
+
+ // test that it reads correctly
+ in.reset(out.getData(), out.getLength());
+ in.mark(strLen);
+ String after;
+ try {
+ after = Text.readString(in, strLen);
+ fail("expected readString to fail when told to read a string " +
+ "that was too long! The string was '" + str + "'");
+ } catch (IOException e) {
+ }
+ in.reset();
+ after = Text.readString(in, strLen + 1);
+ assertTrue(str.equals(after));
+ }
+
+ public void testLimitedIO() throws Exception {
+ doTestLimitedIO("abcd", 4);
+ doTestLimitedIO("", 0);
+ doTestLimitedIO("1", 1);
+ }
public void testCompare() throws Exception {
DataOutputBuffer out1 = new DataOutputBuffer();
@@ -192,16 +226,6 @@ public void testFind() throws Exception {
assertTrue(text.find("\u20ac", 5)==11);
}
- public void testClear() {
- Text text = new Text();
- assertEquals("", text.toString());
- assertEquals(0, text.getBytes().length);
- text = new Text("abcd\u20acbdcd\u20ac");
- text.clear();
- assertEquals("", text.toString());
- assertEquals(0, text.getBytes().length);
- }
-
public void testFindAfterUpdatingContents() throws Exception {
Text text = new Text("abcd");
text.set("a".getBytes());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
index 56b2b2487ba..cc0c5c9f54c 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestRPC.java
@@ -322,6 +322,29 @@ public void testConfRpc() throws Exception {
server.stop();
}
+ @Test
+ public void testProxyAddress() throws Exception {
+ Server server = RPC.getServer(TestProtocol.class,
+ new TestImpl(), ADDRESS, 0, conf);
+ TestProtocol proxy = null;
+
+ try {
+ server.start();
+ InetSocketAddress addr = NetUtils.getConnectAddress(server);
+
+ // create a client
+ proxy = (TestProtocol)RPC.getProxy(
+ TestProtocol.class, TestProtocol.versionID, addr, conf);
+
+ assertEquals(addr, RPC.getServerAddress(proxy));
+ } finally {
+ server.stop();
+ if (proxy != null) {
+ RPC.stopProxy(proxy);
+ }
+ }
+ }
+
@Test
public void testSlowRpc() throws Exception {
System.out.println("Testing Slow RPC");
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
index f10323b8273..e8455862d79 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestNetUtils.java
@@ -25,11 +25,16 @@
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.NetworkInterface;
+import java.net.ServerSocket;
import java.net.Socket;
import java.net.SocketException;
+import java.net.SocketTimeoutException;
import java.net.URI;
import java.net.UnknownHostException;
+import java.util.Arrays;
import java.util.Enumeration;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
import junit.framework.AssertionFailedError;
@@ -37,7 +42,9 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.NetUtilsTestResolver;
+import org.junit.Assume;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
@@ -50,6 +57,13 @@ public class TestNetUtils {
private static final int LOCAL_PORT = 8080;
private static final String LOCAL_PORT_NAME = Integer.toString(LOCAL_PORT);
+ /**
+ * Some slop around expected times when making sure timeouts behave
+ * as expected. We assume that they will be accurate to within
+ * this threshold.
+ */
+ static final long TIME_FUDGE_MILLIS = 200;
+
/**
* Test that we can't accidentally connect back to the connecting socket due
* to a quirk in the TCP spec.
@@ -81,6 +95,79 @@ public void testAvoidLoopbackTcpSockets() throws Exception {
}
}
+ @Test
+ public void testSocketReadTimeoutWithChannel() throws Exception {
+ doSocketReadTimeoutTest(true);
+ }
+
+ @Test
+ public void testSocketReadTimeoutWithoutChannel() throws Exception {
+ doSocketReadTimeoutTest(false);
+ }
+
+
+ private void doSocketReadTimeoutTest(boolean withChannel)
+ throws IOException {
+ // Binding a ServerSocket is enough to accept connections.
+ // Rely on the backlog to accept for us.
+ ServerSocket ss = new ServerSocket(0);
+
+ Socket s;
+ if (withChannel) {
+ s = NetUtils.getDefaultSocketFactory(new Configuration())
+ .createSocket();
+ Assume.assumeNotNull(s.getChannel());
+ } else {
+ s = new Socket();
+ assertNull(s.getChannel());
+ }
+
+ SocketInputWrapper stm = null;
+ try {
+ NetUtils.connect(s, ss.getLocalSocketAddress(), 1000);
+
+ stm = NetUtils.getInputStream(s, 1000);
+ assertReadTimeout(stm, 1000);
+
+ // Change timeout, make sure it applies.
+ stm.setTimeout(1);
+ assertReadTimeout(stm, 1);
+
+ // If there is a channel, then setting the socket timeout
+ // should not matter. If there is not a channel, it will
+ // take effect.
+ s.setSoTimeout(1000);
+ if (withChannel) {
+ assertReadTimeout(stm, 1);
+ } else {
+ assertReadTimeout(stm, 1000);
+ }
+ } finally {
+ IOUtils.closeStream(stm);
+ IOUtils.closeSocket(s);
+ ss.close();
+ }
+ }
+
+ private void assertReadTimeout(SocketInputWrapper stm, int timeoutMillis)
+ throws IOException {
+ long st = System.nanoTime();
+ try {
+ stm.read();
+ fail("Didn't time out");
+ } catch (SocketTimeoutException ste) {
+ assertTimeSince(st, timeoutMillis);
+ }
+ }
+
+ private void assertTimeSince(long startNanos, int expectedMillis) {
+ long durationNano = System.nanoTime() - startNanos;
+ long millis = TimeUnit.MILLISECONDS.convert(
+ durationNano, TimeUnit.NANOSECONDS);
+ assertTrue("Expected " + expectedMillis + "ms, but took " + millis,
+ Math.abs(millis - expectedMillis) < TIME_FUDGE_MILLIS);
+ }
+
/**
* Test for {
* @throws UnknownHostException @link NetUtils#getLocalInetAddress(String)
@@ -512,6 +599,26 @@ public void testCanonicalUriWithNoPortNoDefaultPort() {
assertEquals("scheme://host.a.b/path", uri.toString());
}
+ /**
+ * Test for {@link NetUtils#normalizeHostNames}
+ */
+ @Test
+ public void testNormalizeHostName() {
+ List hosts = Arrays.asList(new String[] {"127.0.0.1",
+ "localhost", "3w.org", "UnknownHost"});
+ List normalizedHosts = NetUtils.normalizeHostNames(hosts);
+ // when ipaddress is normalized, same address is expected in return
+ assertEquals(normalizedHosts.get(0), hosts.get(0));
+ // for normalizing a resolvable hostname, resolved ipaddress is expected in return
+ assertFalse(normalizedHosts.get(1).equals(hosts.get(1)));
+ assertEquals(normalizedHosts.get(1), hosts.get(0));
+ // this address HADOOP-8372: when normalizing a valid resolvable hostname start with numeric,
+ // its ipaddress is expected to return
+ assertFalse(normalizedHosts.get(2).equals(hosts.get(2)));
+ // return the same hostname after normalizing a irresolvable hostname.
+ assertEquals(normalizedHosts.get(3), hosts.get(3));
+ }
+
@Test
public void testGetHostNameOfIP() {
assertNull(NetUtils.getHostNameOfIP(null));
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
index 0c887eb82b8..5e3116e89ed 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSocketIOWithTimeout.java
@@ -19,6 +19,7 @@
import java.io.IOException;
import java.io.InputStream;
+import java.io.InterruptedIOException;
import java.io.OutputStream;
import java.net.SocketTimeoutException;
import java.nio.channels.Pipe;
@@ -26,8 +27,13 @@
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.MultithreadedTestUtil;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestContext;
+import org.apache.hadoop.test.MultithreadedTestUtil.TestingThread;
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.*;
/**
* This tests timout out from SocketInputStream and
@@ -36,14 +42,17 @@
* Normal read and write using these streams are tested by pretty much
* every DFS unit test.
*/
-public class TestSocketIOWithTimeout extends TestCase {
+public class TestSocketIOWithTimeout {
static Log LOG = LogFactory.getLog(TestSocketIOWithTimeout.class);
private static int TIMEOUT = 1*1000;
private static String TEST_STRING = "1234567890";
+
+ private MultithreadedTestUtil.TestContext ctx = new TestContext();
- private void doIO(InputStream in, OutputStream out) throws IOException {
+ private void doIO(InputStream in, OutputStream out,
+ int expectedTimeout) throws IOException {
/* Keep on writing or reading until we get SocketTimeoutException.
* It expects this exception to occur within 100 millis of TIMEOUT.
*/
@@ -61,34 +70,15 @@ private void doIO(InputStream in, OutputStream out) throws IOException {
long diff = System.currentTimeMillis() - start;
LOG.info("Got SocketTimeoutException as expected after " +
diff + " millis : " + e.getMessage());
- assertTrue(Math.abs(TIMEOUT - diff) <= 200);
+ assertTrue(Math.abs(expectedTimeout - diff) <=
+ TestNetUtils.TIME_FUDGE_MILLIS);
break;
}
}
}
- /**
- * Just reads one byte from the input stream.
- */
- static class ReadRunnable implements Runnable {
- private InputStream in;
-
- public ReadRunnable(InputStream in) {
- this.in = in;
- }
- public void run() {
- try {
- in.read();
- } catch (IOException e) {
- LOG.info("Got expection while reading as expected : " +
- e.getMessage());
- return;
- }
- assertTrue(false);
- }
- }
-
- public void testSocketIOWithTimeout() throws IOException {
+ @Test
+ public void testSocketIOWithTimeout() throws Exception {
// first open pipe:
Pipe pipe = Pipe.open();
@@ -96,7 +86,7 @@ public void testSocketIOWithTimeout() throws IOException {
Pipe.SinkChannel sink = pipe.sink();
try {
- InputStream in = new SocketInputStream(source, TIMEOUT);
+ final InputStream in = new SocketInputStream(source, TIMEOUT);
OutputStream out = new SocketOutputStream(sink, TIMEOUT);
byte[] writeBytes = TEST_STRING.getBytes();
@@ -105,37 +95,62 @@ public void testSocketIOWithTimeout() throws IOException {
out.write(writeBytes);
out.write(byteWithHighBit);
- doIO(null, out);
+ doIO(null, out, TIMEOUT);
in.read(readBytes);
assertTrue(Arrays.equals(writeBytes, readBytes));
assertEquals(byteWithHighBit & 0xff, in.read());
- doIO(in, null);
+ doIO(in, null, TIMEOUT);
+
+ // Change timeout on the read side.
+ ((SocketInputStream)in).setTimeout(TIMEOUT * 2);
+ doIO(in, null, TIMEOUT * 2);
+
/*
* Verify that it handles interrupted threads properly.
- * Use a large timeout and expect the thread to return quickly.
+ * Use a large timeout and expect the thread to return quickly
+ * upon interruption.
*/
- in = new SocketInputStream(source, 0);
- Thread thread = new Thread(new ReadRunnable(in));
- thread.start();
-
- try {
- Thread.sleep(1000);
- } catch (InterruptedException ignored) {}
-
+ ((SocketInputStream)in).setTimeout(0);
+ TestingThread thread = new TestingThread(ctx) {
+ @Override
+ public void doWork() throws Exception {
+ try {
+ in.read();
+ fail("Did not fail with interrupt");
+ } catch (InterruptedIOException ste) {
+ LOG.info("Got expection while reading as expected : " +
+ ste.getMessage());
+ }
+ }
+ };
+ ctx.addThread(thread);
+ ctx.startThreads();
+ // If the thread is interrupted before it calls read()
+ // then it throws ClosedByInterruptException due to
+ // some Java quirk. Waiting for it to call read()
+ // gets it into select(), so we get the expected
+ // InterruptedIOException.
+ Thread.sleep(1000);
thread.interrupt();
-
- try {
- thread.join();
- } catch (InterruptedException e) {
- throw new IOException("Unexpected InterruptedException : " + e);
- }
-
+ ctx.stop();
+
//make sure the channels are still open
assertTrue(source.isOpen());
assertTrue(sink.isOpen());
-
+
+ // Nevertheless, the output stream is closed, because
+ // a partial write may have succeeded (see comment in
+ // SocketOutputStream#write(byte[]), int, int)
+ try {
+ out.write(1);
+ fail("Did not throw");
+ } catch (IOException ioe) {
+ GenericTestUtils.assertExceptionContains(
+ "stream is closed", ioe);
+ }
+
out.close();
assertFalse(sink.isOpen());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
index f8b3c33340c..6356555da42 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestTableMapping.java
@@ -41,7 +41,7 @@ public class TestTableMapping {
public void setUp() throws IOException {
mappingFile = File.createTempFile(getClass().getSimpleName(), ".txt");
Files.write("a.b.c /rack1\n" +
- "1.2.3\t/rack2\n", mappingFile, Charsets.UTF_8);
+ "1.2.3.4\t/rack2\n", mappingFile, Charsets.UTF_8);
mappingFile.deleteOnExit();
}
@@ -55,7 +55,7 @@ public void testResolve() throws IOException {
List names = new ArrayList();
names.add("a.b.c");
- names.add("1.2.3");
+ names.add("1.2.3.4");
List result = mapping.resolve(names);
assertEquals(names.size(), result.size());
@@ -73,7 +73,7 @@ public void testTableCaching() throws IOException {
List names = new ArrayList();
names.add("a.b.c");
- names.add("1.2.3");
+ names.add("1.2.3.4");
List result1 = mapping.resolve(names);
assertEquals(names.size(), result1.size());
@@ -96,7 +96,7 @@ public void testNoFile() {
List names = new ArrayList();
names.add("a.b.c");
- names.add("1.2.3");
+ names.add("1.2.3.4");
List result = mapping.resolve(names);
assertEquals(names.size(), result.size());
@@ -114,7 +114,7 @@ public void testFileDoesNotExist() {
List names = new ArrayList();
names.add("a.b.c");
- names.add("1.2.3");
+ names.add("1.2.3.4");
List result = mapping.resolve(names);
assertEquals(names.size(), result.size());
@@ -134,7 +134,7 @@ public void testBadFile() throws IOException {
List names = new ArrayList();
names.add("a.b.c");
- names.add("1.2.3");
+ names.add("1.2.3.4");
List result = mapping.resolve(names);
assertEquals(names.size(), result.size());
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
index 54b75da23bf..6d7d695663b 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/TestToken.java
@@ -18,11 +18,15 @@
package org.apache.hadoop.security.token;
+import static junit.framework.Assert.assertEquals;
+
import java.io.*;
import java.util.Arrays;
import org.apache.hadoop.io.*;
import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenIdentifier;
+import org.apache.hadoop.security.token.delegation.TestDelegationToken.TestDelegationTokenSecretManager;
import junit.framework.TestCase;
@@ -94,5 +98,20 @@ public static void testEncodeWritable() throws Exception {
checkUrlSafe(encode);
}
}
+
+ public void testDecodeIdentifier() throws IOException {
+ TestDelegationTokenSecretManager secretManager =
+ new TestDelegationTokenSecretManager(0, 0, 0, 0);
+ secretManager.startThreads();
+ TestDelegationTokenIdentifier id = new TestDelegationTokenIdentifier(
+ new Text("owner"), new Text("renewer"), new Text("realUser"));
+
+ Token token =
+ new Token(id, secretManager);
+ TokenIdentifier idCopy = token.decodeIdentifier();
+
+ assertNotSame(id, idCopy);
+ assertEquals(id, idCopy);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java
index a300cd25fb7..f01ae2f73d2 100644
--- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java
+++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestVersionUtil.java
@@ -19,7 +19,6 @@
import static org.junit.Assert.*;
-import org.apache.hadoop.test.GenericTestUtils;
import org.junit.Test;
public class TestVersionUtil {
@@ -30,6 +29,8 @@ public void testCompareVersions() {
assertEquals(0, VersionUtil.compareVersions("2.0.0", "2.0.0"));
assertEquals(0, VersionUtil.compareVersions("2.0.0a", "2.0.0a"));
assertEquals(0, VersionUtil.compareVersions("1", "1"));
+ assertEquals(0, VersionUtil.compareVersions(
+ "2.0.0-SNAPSHOT", "2.0.0-SNAPSHOT"));
// Assert that lower versions are lower, and higher versions are higher.
assertExpectedValues("1", "2.0.0");
@@ -52,6 +53,13 @@ public void testCompareVersions() {
assertExpectedValues("1.0.0a2", "1.0.0a10");
assertExpectedValues("1.0", "1.a");
assertExpectedValues("1.0", "1.a0");
+
+ // Snapshot builds precede their eventual releases.
+ assertExpectedValues("1.0-SNAPSHOT", "1.0");
+ assertExpectedValues("1.0", "1.0.0-SNAPSHOT");
+ assertExpectedValues("1.0.0-SNAPSHOT", "1.0.0");
+ assertExpectedValues("1.0.0", "1.0.1-SNAPSHOT");
+ assertExpectedValues("1.0.1-SNAPSHOT", "1.0.1");
}
private static void assertExpectedValues(String lower, String higher) {
diff --git a/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 00000000000..891a67b61f4
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,2 @@
+org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier
+org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1ee488d6df6..f9ee22a6c8b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -368,9 +368,6 @@ Release 2.0.0 - UNRELEASED
HDFS-2505. Add a test to verify getFileChecksum(..) with ViewFS. (Ravi
Prakash via szetszwo)
- HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
- and epoch in JournalProtocol. (suresh via szetszwo)
-
HDFS-3240. Drop log level of "heartbeat: ..." in BPServiceActor to DEBUG
(todd)
@@ -419,6 +416,44 @@ Release 2.0.0 - UNRELEASED
HDFS-3339. Change INode to package private. (John George via szetszwo)
+ HDFS-3303. Remove Writable implementation from RemoteEditLogManifest.
+ (Brandon Li via szetszwo)
+
+ HDFS-2617. Replaced Kerberized SSL for image transfer and fsck
+ with SPNEGO-based solution. (jghoman, tucu, and atm via eli)
+
+ HDFS-3365. Enable users to disable socket caching in DFS client
+ configuration (todd)
+
+ HDFS-3375. Put client name in DataXceiver thread name for readBlock
+ and keepalive (todd)
+
+ HDFS-3363. Define BlockCollection and MutableBlockCollection interfaces
+ so that INodeFile and INodeFileUnderConstruction do not have to be used in
+ block management. (John George via szetszwo)
+
+ HDFS-3390. DFSAdmin should print full stack traces of errors when DEBUG
+ logging is enabled. (atm)
+
+ HDFS-3341. Change minimum RPC versions to respective SNAPSHOTs instead of
+ final releases. (todd)
+
+ HDFS-3369. Rename {get|set|add}INode(..) methods in BlockManager and
+ BlocksMap to {get|set|add}BlockCollection(..). (John George via szetszwo)
+
+ HDFS-3134. harden edit log loader against malformed or malicious input.
+ (Colin Patrick McCabe via eli)
+
+ HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
+
+ HDFS-3401. Cleanup DatanodeDescriptor creation in the tests. (eli)
+
+ HDFS-3400. DNs should be able start with jsvc even if security is disabled.
+ (atm via eli)
+
+ HDFS-3211. Add fence(..) and replace NamenodeRegistration with JournalInfo
+ and epoch in JournalProtocol. (suresh via szetszwo)
+
OPTIMIZATIONS
HDFS-3024. Improve performance of stringification in addStoredBlock (todd)
@@ -432,6 +467,8 @@ Release 2.0.0 - UNRELEASED
HDFS-2476. More CPU efficient data structure for under-replicated,
over-replicated, and invalidated blocks. (Tomasz Nykiel via todd)
+ HDFS-3378. Remove DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY and DEFAULT. (eli)
+
BUG FIXES
HDFS-2481. Unknown protocol: org.apache.hadoop.hdfs.protocol.ClientProtocol.
@@ -589,6 +626,33 @@ Release 2.0.0 - UNRELEASED
HDFS-3330. If GetImageServlet throws an Error or RTE, response should not
have HTTP "OK" status. (todd)
+ HDFS-3351. NameNode#initializeGenericKeys should always set fs.defaultFS
+ regardless of whether HA or Federation is enabled. (atm)
+
+ HDFS-3359. DFSClient.close should close cached sockets. (todd)
+
+ HDFS-3350. In INode, add final to compareTo(..), equals(..) and hashCode(),
+ and remove synchronized from updatePermissionStatus(..). (szetszwo)
+
+ HDFS-3357. DataXceiver reads from client socket with incorrect/no timeout
+ (todd)
+
+ HDFS-3376. DFSClient fails to make connection to DN if there are many
+ unusable cached sockets (todd)
+
+ HDFS-3328. NPE in DataNode.getIpcPort. (eli)
+
+ HDFS-3396. FUSE build fails on Ubuntu 12.04. (Colin Patrick McCabe via eli)
+
+ HDFS-3395. NN doesn't start with HA+security enabled and HTTP address
+ set to 0.0.0.0. (atm)
+
+ HDFS-3385. The last block of INodeFileUnderConstruction is not
+ necessarily a BlockInfoUnderConstruction, so do not cast it in
+ FSNamesystem.recoverLeaseInternal(..). (szetszwo)
+
+ HDFS-3026. HA: Handle failure during HA state transition. (atm)
+
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
index 85c81c226aa..706297f314e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/fuse-dfs/src/Makefile.am
@@ -18,4 +18,5 @@ bin_PROGRAMS = fuse_dfs
fuse_dfs_SOURCES = fuse_dfs.c fuse_options.c fuse_trash.c fuse_stat_struct.c fuse_users.c fuse_init.c fuse_connect.c fuse_impls_access.c fuse_impls_chmod.c fuse_impls_chown.c fuse_impls_create.c fuse_impls_flush.c fuse_impls_getattr.c fuse_impls_mkdir.c fuse_impls_mknod.c fuse_impls_open.c fuse_impls_read.c fuse_impls_release.c fuse_impls_readdir.c fuse_impls_rename.c fuse_impls_rmdir.c fuse_impls_statfs.c fuse_impls_symlink.c fuse_impls_truncate.c fuse_impls_utimens.c fuse_impls_unlink.c fuse_impls_write.c
AM_CFLAGS= -Wall -g
AM_CPPFLAGS= -DPERMS=$(PERMS) -D_FILE_OFFSET_BITS=64 -I$(JAVA_HOME)/include -I$(HADOOP_PREFIX)/../../src/main/native -I$(JAVA_HOME)/include/linux -D_FUSE_DFS_VERSION=\"$(PACKAGE_VERSION)\" -DPROTECTED_PATHS=\"$(PROTECTED_PATHS)\" -I$(FUSE_HOME)/include
-AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -lhdfs -L$(FUSE_HOME)/lib -lfuse -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server -ljvm -lm
+AM_LDFLAGS= -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib64 -L$(HADOOP_PREFIX)/../../target/native/target/usr/local/lib -L$(FUSE_HOME)/lib -L$(JAVA_HOME)/jre/lib/$(OS_ARCH)/server
+fuse_dfs_LDADD=-lfuse -lhdfs -ljvm -lm
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 6055ca7abf7..06b8b5acf39 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -57,16 +57,21 @@ shift
# Determine if we're starting a secure datanode, and if so, redefine appropriate variables
if [ "$COMMAND" == "datanode" ] && [ "$EUID" -eq 0 ] && [ -n "$HADOOP_SECURE_DN_USER" ]; then
- if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
- HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+ if [ -n "$JSVC_HOME" ]; then
+ if [ -n "$HADOOP_SECURE_DN_PID_DIR" ]; then
+ HADOOP_PID_DIR=$HADOOP_SECURE_DN_PID_DIR
+ fi
+
+ if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
+ HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
+ fi
+
+ HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
+ starting_secure_dn="true"
+ else
+ echo "It looks like you're trying to start a secure DN, but \$JSVC_HOME"\
+ "isn't set. Falling back to starting insecure DN."
fi
-
- if [ -n "$HADOOP_SECURE_DN_LOG_DIR" ]; then
- HADOOP_LOG_DIR=$HADOOP_SECURE_DN_LOG_DIR
- fi
-
- HADOOP_IDENT_STRING=$HADOOP_SECURE_DN_USER
- starting_secure_dn="true"
fi
if [ "$COMMAND" = "namenode" ] ; then
@@ -129,12 +134,12 @@ if [ "$starting_secure_dn" = "true" ]; then
if [ "$HADOOP_PID_DIR" = "" ]; then
HADOOP_SECURE_DN_PID="/tmp/hadoop_secure_dn.pid"
else
- HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
+ HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
fi
JSVC=$JSVC_HOME/jsvc
if [ ! -f $JSVC ]; then
- echo "JSVC_HOME is not set correctly so jsvc can not be found. Jsvc is required to run secure datanodes. "
+ echo "JSVC_HOME is not set correctly so jsvc cannot be found. Jsvc is required to run secure datanodes. "
echo "Please download and install jsvc from http://archive.apache.org/dist/commons/daemon/binaries/ "\
"and set JSVC_HOME to the directory containing the jsvc binary."
exit
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 43b1ba6fb84..969b0581282 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -560,6 +560,7 @@ void closeConnectionToNamenode() {
void abort() {
clientRunning = false;
closeAllFilesBeingWritten(true);
+ socketCache.clear();
closeConnectionToNamenode();
}
@@ -597,6 +598,7 @@ private void closeAllFilesBeingWritten(final boolean abort) {
public synchronized void close() throws IOException {
if(clientRunning) {
closeAllFilesBeingWritten(false);
+ socketCache.clear();
clientRunning = false;
leaserenewer.closeClient(this);
// close connections to the namenode
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 7e50f272cd1..493f48ec96c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -99,8 +99,6 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT = 0;
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY = "dfs.namenode.secondary.http-address";
public static final String DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_DEFAULT = "0.0.0.0:50090";
- public static final String DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY = "dfs.namenode.secondary.https-port";
- public static final int DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT = 50490;
public static final String DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY = "dfs.namenode.checkpoint.check.period";
public static final long DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_DEFAULT = 60;
public static final String DFS_NAMENODE_CHECKPOINT_PERIOD_KEY = "dfs.namenode.checkpoint.period";
@@ -147,7 +145,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_KEY = "dfs.namenode.num.extra.edits.retained";
public static final int DFS_NAMENODE_NUM_EXTRA_EDITS_RETAINED_DEFAULT = 1000000; //1M
public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_KEY = "dfs.namenode.min.supported.datanode.version";
- public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0";
+ public static final String DFS_NAMENODE_MIN_SUPPORTED_DATANODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
public static final String DFS_NAMENODE_EDITS_DIR_MINIMUM_KEY = "dfs.namenode.edits.dir.minimum";
public static final int DFS_NAMENODE_EDITS_DIR_MINIMUM_DEFAULT = 1;
@@ -265,7 +263,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final int DFS_DATANODE_IPC_DEFAULT_PORT = 50020;
public static final String DFS_DATANODE_IPC_ADDRESS_DEFAULT = "0.0.0.0" + DFS_DATANODE_IPC_DEFAULT_PORT;
public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_KEY = "dfs.datanode.min.supported.namenode.version";
- public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0";
+ public static final String DFS_DATANODE_MIN_SUPPORTED_NAMENODE_VERSION_DEFAULT = "3.0.0-SNAPSHOT";
public static final String DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY = "dfs.block.access.token.enable";
public static final boolean DFS_BLOCK_ACCESS_TOKEN_ENABLE_DEFAULT = false;
@@ -319,10 +317,10 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DATANODE_USER_NAME_KEY = "dfs.datanode.kerberos.principal";
public static final String DFS_NAMENODE_KEYTAB_FILE_KEY = "dfs.namenode.keytab.file";
public static final String DFS_NAMENODE_USER_NAME_KEY = "dfs.namenode.kerberos.principal";
- public static final String DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.namenode.kerberos.https.principal";
+ public static final String DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.namenode.kerberos.internal.spnego.principal";
public static final String DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY = "dfs.secondary.namenode.keytab.file";
public static final String DFS_SECONDARY_NAMENODE_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.principal";
- public static final String DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.https.principal";
+ public static final String DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY = "dfs.secondary.namenode.kerberos.internal.spnego.principal";
public static final String DFS_NAMENODE_NAME_CACHE_THRESHOLD_KEY = "dfs.namenode.name.cache.threshold";
public static final int DFS_NAMENODE_NAME_CACHE_THRESHOLD_DEFAULT = 10;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 8bbe4f37cb0..d5cd436c468 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -864,7 +864,13 @@ protected BlockReader getBlockReader(InetSocketAddress dnAddr,
// Allow retry since there is no way of knowing whether the cached socket
// is good until we actually use it.
for (int retries = 0; retries <= nCachedConnRetry && fromCache; ++retries) {
- Socket sock = socketCache.get(dnAddr);
+ Socket sock = null;
+ // Don't use the cache on the last attempt - it's possible that there
+ // are arbitrarily many unusable sockets in the cache, but we don't
+ // want to fail the read.
+ if (retries < nCachedConnRetry) {
+ sock = socketCache.get(dnAddr);
+ }
if (sock == null) {
fromCache = false;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 9e244a25315..dccc4e2ee0b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -714,8 +714,11 @@ public static String getInfoServer(
public static String substituteForWildcardAddress(String configuredAddress,
String defaultHost) throws IOException {
InetSocketAddress sockAddr = NetUtils.createSocketAddr(configuredAddress);
+ InetSocketAddress defaultSockAddr = NetUtils.createSocketAddr(defaultHost
+ + ":0");
if (sockAddr.getAddress().isAnyLocalAddress()) {
- if(UserGroupInformation.isSecurityEnabled()) {
+ if (UserGroupInformation.isSecurityEnabled() &&
+ defaultSockAddr.getAddress().isAnyLocalAddress()) {
throw new IOException("Cannot use a wildcard address with security. " +
"Must explicitly set bind address for Kerberos");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
index 621dde03801..1454fdbd6f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
@@ -81,7 +81,6 @@ private static void addDeprecatedKeys() {
deprecate("dfs.safemode.extension", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY);
deprecate("dfs.safemode.threshold.pct", DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY);
deprecate("dfs.secondary.http.address", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY);
- deprecate("dfs.secondary.https.port", DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY);
deprecate("dfs.socket.timeout", DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY);
deprecate("fs.checkpoint.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_DIR_KEY);
deprecate("fs.checkpoint.edits.dir", DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
index befa58c56ab..989fc123004 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
@@ -144,7 +144,7 @@ protected URI getNamenodeUri(URI uri) {
}
protected URI getNamenodeSecureUri(URI uri) {
- return DFSUtil.createUri("https", getNamenodeSecureAddr(uri));
+ return DFSUtil.createUri("http", getNamenodeSecureAddr(uri));
}
@Override
@@ -247,7 +247,7 @@ public Token> run() throws IOException {
c = DelegationTokenFetcher.getDTfromRemote(nnHttpUrl, renewer);
} catch (Exception e) {
LOG.info("Couldn't get a delegation token from " + nnHttpUrl +
- " using https.");
+ " using http.");
if(LOG.isDebugEnabled()) {
LOG.debug("error was ", e);
}
@@ -686,11 +686,11 @@ public long renew(Token> token,
Configuration conf) throws IOException {
// update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().reloginFromKeytab();
- // use https to renew the token
+ // use http to renew the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
return
DelegationTokenFetcher.renewDelegationToken
- (DFSUtil.createUri("https", serviceAddr).toString(),
+ (DFSUtil.createUri("http", serviceAddr).toString(),
(Token) token);
}
@@ -700,10 +700,10 @@ public void cancel(Token> token,
Configuration conf) throws IOException {
// update the kerberos credentials, if they are coming from a keytab
UserGroupInformation.getLoginUser().checkTGTAndReloginFromKeytab();
- // use https to cancel the token
+ // use http to cancel the token
InetSocketAddress serviceAddr = SecurityUtil.getTokenServiceAddr(token);
DelegationTokenFetcher.cancelDelegationToken
- (DFSUtil.createUri("https", serviceAddr).toString(),
+ (DFSUtil.createUri("http", serviceAddr).toString(),
(Token) token);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
index 0713de8ca8f..fe4dc55c8d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java
@@ -46,7 +46,7 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.hdfs.util.DirectBufferPool;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.SocketInputStream;
+import org.apache.hadoop.net.SocketInputWrapper;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
@@ -450,11 +450,8 @@ public static BlockReader newBlockReader( Socket sock, String file,
//
// Get bytes in block, set streams
//
- Preconditions.checkArgument(sock.getChannel() != null,
- "Socket %s does not have an associated Channel.",
- sock);
- SocketInputStream sin =
- (SocketInputStream)NetUtils.getInputStream(sock);
+ SocketInputWrapper sin = NetUtils.getInputStream(sock);
+ ReadableByteChannel ch = sin.getReadableByteChannel();
DataInputStream in = new DataInputStream(sin);
BlockOpResponseProto status = BlockOpResponseProto.parseFrom(
@@ -477,7 +474,7 @@ public static BlockReader newBlockReader( Socket sock, String file,
}
return new RemoteBlockReader2(file, block.getBlockPoolId(), block.getBlockId(),
- sin, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
+ ch, checksum, verifyChecksum, startOffset, firstChunkOffset, len, sock);
}
static void checkSuccess(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
index 508ec61ca28..36b78834ede 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
@@ -47,6 +47,9 @@ class SocketCache {
public SocketCache(int capacity) {
multimap = LinkedListMultimap.create();
this.capacity = capacity;
+ if (capacity <= 0) {
+ LOG.debug("SocketCache disabled in configuration.");
+ }
}
/**
@@ -55,6 +58,10 @@ public SocketCache(int capacity) {
* @return A socket with unknown state, possibly closed underneath. Or null.
*/
public synchronized Socket get(SocketAddress remote) {
+ if (capacity <= 0) { // disabled
+ return null;
+ }
+
List socklist = multimap.get(remote);
if (socklist == null) {
return null;
@@ -76,6 +83,12 @@ public synchronized Socket get(SocketAddress remote) {
* @param sock socket not used by anyone.
*/
public synchronized void put(Socket sock) {
+ if (capacity <= 0) {
+ // Cache disabled.
+ IOUtils.closeSocket(sock);
+ return;
+ }
+
Preconditions.checkNotNull(sock);
SocketAddress remoteAddr = sock.getRemoteSocketAddress();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
index c1fd3f9f826..62f2d762379 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
@@ -148,7 +148,8 @@ public void readFields(DataInput in) throws IOException {
userId = WritableUtils.readString(in);
blockPoolId = WritableUtils.readString(in);
blockId = WritableUtils.readVLong(in);
- int length = WritableUtils.readVInt(in);
+ int length = WritableUtils.readVIntInRange(in, 0,
+ AccessMode.class.getEnumConstants().length);
for (int i = 0; i < length; i++) {
modes.add(WritableUtils.readEnum(in, AccessMode.class));
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
new file mode 100644
index 00000000000..f7c33cad011
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -0,0 +1,63 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.fs.ContentSummary;
+
+/**
+ * This interface is used by the block manager to expose a
+ * few characteristics of a collection of Block/BlockUnderConstruction.
+ */
+public interface BlockCollection {
+ /**
+ * Get the last block of the collection.
+ * Make sure it has the right type.
+ */
+ public T getLastBlock() throws IOException;
+
+ /**
+ * Get content summary.
+ */
+ public ContentSummary computeContentSummary();
+
+ /** @return the number of blocks */
+ public int numBlocks();
+
+ public BlockInfo[] getBlocks();
+ /**
+ * Get preferred block size for the collection
+ * @return preferred block size in bytes
+ */
+ public long getPreferredBlockSize();
+
+ /**
+ * Get block replication for the collection
+ * @return block replication value
+ */
+ public short getReplication();
+
+ /**
+ * Get name of collection.
+ */
+ public String getName();
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
index ce3ff8b3ed3..be86b536c3b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
@@ -22,18 +22,17 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.LightWeightGSet;
/**
* BlockInfo class maintains for a given block
- * the {@link INodeFile} it is part of and datanodes where the replicas of
+ * the {@link BlockCollection} it is part of and datanodes where the replicas of
* the block are stored.
*/
@InterfaceAudience.Private
public class BlockInfo extends Block implements
LightWeightGSet.LinkedElement {
- private INodeFile inode;
+ private BlockCollection bc;
/** For implementing {@link LightWeightGSet.LinkedElement} interface */
private LightWeightGSet.LinkedElement nextLinkedElement;
@@ -58,13 +57,13 @@ public class BlockInfo extends Block implements
*/
public BlockInfo(int replication) {
this.triplets = new Object[3*replication];
- this.inode = null;
+ this.bc = null;
}
public BlockInfo(Block blk, int replication) {
super(blk);
this.triplets = new Object[3*replication];
- this.inode = null;
+ this.bc = null;
}
/**
@@ -73,16 +72,16 @@ public BlockInfo(Block blk, int replication) {
* @param from BlockInfo to copy from.
*/
protected BlockInfo(BlockInfo from) {
- this(from, from.inode.getReplication());
- this.inode = from.inode;
+ this(from, from.bc.getReplication());
+ this.bc = from.bc;
}
- public INodeFile getINode() {
- return inode;
+ public BlockCollection getBlockCollection() {
+ return bc;
}
- public void setINode(INodeFile inode) {
- this.inode = inode;
+ public void setBlockCollection(BlockCollection bc) {
+ this.bc = bc;
}
DatanodeDescriptor getDatanode(int index) {
@@ -335,7 +334,7 @@ public BlockInfoUnderConstruction convertToBlockUnderConstruction(
BlockUCState s, DatanodeDescriptor[] targets) {
if(isComplete()) {
return new BlockInfoUnderConstruction(
- this, getINode().getReplication(), s, targets);
+ this, getBlockCollection().getReplication(), s, targets);
}
// the block is already under construction
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)this;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
index 6509f3d7fa9..5c0db1bb9b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoUnderConstruction.java
@@ -234,7 +234,7 @@ public void initializeBlockRecovery(long recoveryId) {
blockRecoveryId = recoveryId;
if (replicas.size() == 0) {
NameNode.stateChangeLog.warn("BLOCK*"
- + " INodeFileUnderConstruction.initLeaseRecovery:"
+ + " BlockInfoUnderConstruction.initLeaseRecovery:"
+ " No blocks found, lease removed.");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a8121064139..1568e23ac51 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -55,8 +55,6 @@
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
-import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.Namesystem;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
@@ -142,7 +140,7 @@ public int getPendingDataNodeMessageCount() {
private final long replicationRecheckInterval;
/**
- * Mapping: Block -> { INode, datanodes, self ref }
+ * Mapping: Block -> { BlockCollection, datanodes, self ref }
* Updated only in response to client-sent information.
*/
final BlocksMap blocksMap;
@@ -192,7 +190,7 @@ public int getPendingDataNodeMessageCount() {
public final short minReplication;
/** Default number of replicas */
public final int defaultReplication;
- /** The maximum number of entries returned by getCorruptInodes() */
+ /** value returned by MAX_CORRUPT_FILES_RETURNED */
final int maxCorruptFilesReturned;
/** variable to enable check for enough racks */
@@ -384,7 +382,7 @@ private void dumpBlockMeta(Block block, PrintWriter out) {
numReplicas.decommissionedReplicas();
if (block instanceof BlockInfo) {
- String fileName = ((BlockInfo)block).getINode().getFullPathName();
+ String fileName = ((BlockInfo)block).getBlockCollection().getName();
out.print(fileName + ": ");
}
// l: == live:, d: == decommissioned c: == corrupt e: == excess
@@ -454,17 +452,17 @@ private boolean commitBlock(final BlockInfoUnderConstruction block,
* Commit the last block of the file and mark it as complete if it has
* meets the minimum replication requirement
*
- * @param fileINode file inode
+ * @param bc block collection
* @param commitBlock - contains client reported block length and generation
* @return true if the last block is changed to committed state.
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- public boolean commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode,
+ public boolean commitOrCompleteLastBlock(MutableBlockCollection bc,
Block commitBlock) throws IOException {
if(commitBlock == null)
return false; // not committing, this is a block allocation retry
- BlockInfo lastBlock = fileINode.getLastBlock();
+ BlockInfo lastBlock = bc.getLastBlock();
if(lastBlock == null)
return false; // no blocks in file yet
if(lastBlock.isComplete())
@@ -472,22 +470,22 @@ public boolean commitOrCompleteLastBlock(INodeFileUnderConstruction fileINode,
final boolean b = commitBlock((BlockInfoUnderConstruction)lastBlock, commitBlock);
if(countNodes(lastBlock).liveReplicas() >= minReplication)
- completeBlock(fileINode,fileINode.numBlocks()-1, false);
+ completeBlock(bc, bc.numBlocks()-1, false);
return b;
}
/**
* Convert a specified block of the file to a complete block.
- * @param fileINode file
+ * @param bc file
* @param blkIndex block index in the file
* @throws IOException if the block does not have at least a minimal number
* of replicas reported from data-nodes.
*/
- private BlockInfo completeBlock(final INodeFile fileINode,
+ private BlockInfo completeBlock(final MutableBlockCollection bc,
final int blkIndex, boolean force) throws IOException {
if(blkIndex < 0)
return null;
- BlockInfo curBlock = fileINode.getBlocks()[blkIndex];
+ BlockInfo curBlock = bc.getBlocks()[blkIndex];
if(curBlock.isComplete())
return curBlock;
BlockInfoUnderConstruction ucBlock = (BlockInfoUnderConstruction)curBlock;
@@ -500,7 +498,7 @@ private BlockInfo completeBlock(final INodeFile fileINode,
"Cannot complete block: block has not been COMMITTED by the client");
BlockInfo completeBlock = ucBlock.convertToCompleteBlock();
// replace penultimate block in file
- fileINode.setBlock(blkIndex, completeBlock);
+ bc.setBlock(blkIndex, completeBlock);
// Since safe-mode only counts complete blocks, and we now have
// one more complete block, we need to adjust the total up, and
@@ -516,12 +514,12 @@ private BlockInfo completeBlock(final INodeFile fileINode,
return blocksMap.replaceBlock(completeBlock);
}
- private BlockInfo completeBlock(final INodeFile fileINode,
+ private BlockInfo completeBlock(final MutableBlockCollection bc,
final BlockInfo block, boolean force) throws IOException {
- BlockInfo[] fileBlocks = fileINode.getBlocks();
+ BlockInfo[] fileBlocks = bc.getBlocks();
for(int idx = 0; idx < fileBlocks.length; idx++)
if(fileBlocks[idx] == block) {
- return completeBlock(fileINode, idx, force);
+ return completeBlock(bc, idx, force);
}
return block;
}
@@ -531,10 +529,10 @@ private BlockInfo completeBlock(final INodeFile fileINode,
* regardless of whether enough replicas are present. This is necessary
* when tailing edit logs as a Standby.
*/
- public BlockInfo forceCompleteBlock(final INodeFile fileINode,
+ public BlockInfo forceCompleteBlock(final MutableBlockCollection bc,
final BlockInfoUnderConstruction block) throws IOException {
block.commitBlock(block);
- return completeBlock(fileINode, block, true);
+ return completeBlock(bc, block, true);
}
@@ -548,14 +546,14 @@ public BlockInfo forceCompleteBlock(final INodeFile fileINode,
* The methods returns null if there is no partial block at the end.
* The client is supposed to allocate a new block with the next call.
*
- * @param fileINode file
+ * @param bc file
* @return the last block locations if the block is partial or null otherwise
*/
public LocatedBlock convertLastBlockToUnderConstruction(
- INodeFileUnderConstruction fileINode) throws IOException {
- BlockInfo oldBlock = fileINode.getLastBlock();
+ MutableBlockCollection bc) throws IOException {
+ BlockInfo oldBlock = bc.getLastBlock();
if(oldBlock == null ||
- fileINode.getPreferredBlockSize() == oldBlock.getNumBytes())
+ bc.getPreferredBlockSize() == oldBlock.getNumBytes())
return null;
assert oldBlock == getStoredBlock(oldBlock) :
"last block of the file is not in blocksMap";
@@ -563,7 +561,7 @@ public LocatedBlock convertLastBlockToUnderConstruction(
DatanodeDescriptor[] targets = getNodes(oldBlock);
BlockInfoUnderConstruction ucBlock =
- fileINode.setLastBlock(oldBlock, targets);
+ bc.setLastBlock(oldBlock, targets);
blocksMap.replaceBlock(ucBlock);
// Remove block from replication queue.
@@ -583,7 +581,7 @@ public LocatedBlock convertLastBlockToUnderConstruction(
// always decrement total blocks
-1);
- final long fileLength = fileINode.computeContentSummary().getLength();
+ final long fileLength = bc.computeContentSummary().getLength();
final long pos = fileLength - ucBlock.getNumBytes();
return createLocatedBlock(ucBlock, pos, AccessMode.WRITE);
}
@@ -923,8 +921,8 @@ private void markBlockAsCorrupt(BlockInfo storedBlock,
" does not exist. ");
}
- INodeFile inode = storedBlock.getINode();
- if (inode == null) {
+ BlockCollection bc = storedBlock.getBlockCollection();
+ if (bc == null) {
NameNode.stateChangeLog.info("BLOCK markBlockAsCorrupt: " +
"block " + storedBlock +
" could not be marked as corrupt as it" +
@@ -938,7 +936,7 @@ private void markBlockAsCorrupt(BlockInfo storedBlock,
// Add this replica to corruptReplicas Map
corruptReplicas.addToCorruptReplicasMap(storedBlock, node, reason);
- if (countNodes(storedBlock).liveReplicas() >= inode.getReplication()) {
+ if (countNodes(storedBlock).liveReplicas() >= bc.getReplication()) {
// the block is over-replicated so invalidate the replicas immediately
invalidateBlock(storedBlock, node);
} else if (namesystem.isPopulatingReplQueues()) {
@@ -1051,7 +1049,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) {
int requiredReplication, numEffectiveReplicas;
List containingNodes, liveReplicaNodes;
DatanodeDescriptor srcNode;
- INodeFile fileINode = null;
+ BlockCollection bc = null;
int additionalReplRequired;
int scheduledWork = 0;
@@ -1063,15 +1061,15 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) {
for (int priority = 0; priority < blocksToReplicate.size(); priority++) {
for (Block block : blocksToReplicate.get(priority)) {
// block should belong to a file
- fileINode = blocksMap.getINode(block);
+ bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
- if(fileINode == null || fileINode.isUnderConstruction()) {
+ if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications
neededReplications.decrementReplicationIndex(priority);
continue;
}
- requiredReplication = fileINode.getReplication();
+ requiredReplication = bc.getReplication();
// get a source data-node
containingNodes = new ArrayList();
@@ -1107,7 +1105,7 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) {
} else {
additionalReplRequired = 1; // Needed on a new rack
}
- work.add(new ReplicationWork(block, fileINode, srcNode,
+ work.add(new ReplicationWork(block, bc, srcNode,
containingNodes, liveReplicaNodes, additionalReplRequired,
priority));
}
@@ -1129,8 +1127,8 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) {
// choose replication targets: NOT HOLDING THE GLOBAL LOCK
// It is costly to extract the filename for which chooseTargets is called,
- // so for now we pass in the Inode itself.
- rw.targets = blockplacement.chooseTarget(rw.fileINode,
+ // so for now we pass in the block collection itself.
+ rw.targets = blockplacement.chooseTarget(rw.bc,
rw.additionalReplRequired, rw.srcNode, rw.liveReplicaNodes,
excludedNodes, rw.block.getNumBytes());
}
@@ -1149,15 +1147,15 @@ int computeReplicationWorkForBlocks(List> blocksToReplicate) {
int priority = rw.priority;
// Recheck since global lock was released
// block should belong to a file
- fileINode = blocksMap.getINode(block);
+ bc = blocksMap.getBlockCollection(block);
// abandoned block or block reopened for append
- if(fileINode == null || fileINode.isUnderConstruction()) {
+ if(bc == null || bc instanceof MutableBlockCollection) {
neededReplications.remove(block, priority); // remove from neededReplications
rw.targets = null;
neededReplications.decrementReplicationIndex(priority);
continue;
}
- requiredReplication = fileINode.getReplication();
+ requiredReplication = bc.getReplication();
// do not schedule more if enough replicas is already pending
NumberReplicas numReplicas = countNodes(block);
@@ -1916,7 +1914,7 @@ private void addStoredBlockImmediate(BlockInfo storedBlock,
int numCurrentReplica = countLiveNodes(storedBlock);
if (storedBlock.getBlockUCState() == BlockUCState.COMMITTED
&& numCurrentReplica >= minReplication) {
- completeBlock(storedBlock.getINode(), storedBlock, false);
+ completeBlock((MutableBlockCollection)storedBlock.getBlockCollection(), storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that.
@@ -1944,7 +1942,7 @@ private Block addStoredBlock(final BlockInfo block,
} else {
storedBlock = block;
}
- if (storedBlock == null || storedBlock.getINode() == null) {
+ if (storedBlock == null || storedBlock.getBlockCollection() == null) {
// If this block does not belong to anyfile, then we are done.
NameNode.stateChangeLog.info("BLOCK* addStoredBlock: " + block + " on "
+ node + " size " + block.getNumBytes()
@@ -1954,8 +1952,8 @@ private Block addStoredBlock(final BlockInfo block,
return block;
}
assert storedBlock != null : "Block must be stored by now";
- INodeFile fileINode = storedBlock.getINode();
- assert fileINode != null : "Block must belong to a file";
+ BlockCollection bc = storedBlock.getBlockCollection();
+ assert bc != null : "Block must belong to a file";
// add block to the datanode
boolean added = node.addBlock(storedBlock);
@@ -1981,7 +1979,7 @@ private Block addStoredBlock(final BlockInfo block,
if(storedBlock.getBlockUCState() == BlockUCState.COMMITTED &&
numLiveReplicas >= minReplication) {
- storedBlock = completeBlock(fileINode, storedBlock, false);
+ storedBlock = completeBlock((MutableBlockCollection)bc, storedBlock, false);
} else if (storedBlock.isComplete()) {
// check whether safe replication is reached for the block
// only complete blocks are counted towards that
@@ -1992,7 +1990,7 @@ private Block addStoredBlock(final BlockInfo block,
}
// if file is under construction, then done for now
- if (fileINode.isUnderConstruction()) {
+ if (bc instanceof MutableBlockCollection) {
return storedBlock;
}
@@ -2002,7 +2000,7 @@ private Block addStoredBlock(final BlockInfo block,
}
// handle underReplication/overReplication
- short fileReplication = fileINode.getReplication();
+ short fileReplication = bc.getReplication();
if (!isNeededReplication(storedBlock, fileReplication, numCurrentReplica)) {
neededReplications.remove(storedBlock, numCurrentReplica,
num.decommissionedReplicas(), fileReplication);
@@ -2129,8 +2127,8 @@ public void processMisReplicatedBlocks() {
* what happened with it.
*/
private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
- INodeFile fileINode = block.getINode();
- if (fileINode == null) {
+ BlockCollection bc = block.getBlockCollection();
+ if (bc == null) {
// block does not belong to any file
addToInvalidates(block);
return MisReplicationResult.INVALID;
@@ -2141,7 +2139,7 @@ private MisReplicationResult processMisReplicatedBlock(BlockInfo block) {
return MisReplicationResult.UNDER_CONSTRUCTION;
}
// calculate current replication
- short expectedReplication = fileINode.getReplication();
+ short expectedReplication = bc.getReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
// add to under-replicated queue if need to be
@@ -2258,7 +2256,7 @@ private void chooseExcessReplicates(Collection nonExcess,
BlockPlacementPolicy replicator) {
assert namesystem.hasWriteLock();
// first form a rack to datanodes map and
- INodeFile inode = getINode(b);
+ BlockCollection bc = getBlockCollection(b);
final Map> rackMap
= new HashMap>();
for(final Iterator iter = nonExcess.iterator();
@@ -2298,7 +2296,7 @@ private void chooseExcessReplicates(Collection nonExcess,
|| (addedNode != null && !priSet.contains(addedNode))) ) {
cur = delNodeHint;
} else { // regular excessive replica removal
- cur = replicator.chooseReplicaToDelete(inode, b, replication,
+ cur = replicator.chooseReplicaToDelete(bc, b, replication,
priSet, remains);
}
firstOne = false;
@@ -2379,8 +2377,8 @@ public void removeStoredBlock(Block block, DatanodeDescriptor node) {
// necessary. In that case, put block on a possibly-will-
// be-replicated list.
//
- INodeFile fileINode = blocksMap.getINode(block);
- if (fileINode != null) {
+ BlockCollection bc = blocksMap.getBlockCollection(block);
+ if (bc != null) {
namesystem.decrementSafeBlockCount(block);
updateNeededReplications(block, -1, 0);
}
@@ -2611,7 +2609,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
NumberReplicas num) {
int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block);
- INodeFile fileINode = blocksMap.getINode(block);
+ BlockCollection bc = blocksMap.getBlockCollection(block);
Iterator nodeIter = blocksMap.nodeIterator(block);
StringBuilder nodeList = new StringBuilder();
while (nodeIter.hasNext()) {
@@ -2624,7 +2622,7 @@ private void logBlockReplicationInfo(Block block, DatanodeDescriptor srcNode,
+ ", corrupt replicas: " + num.corruptReplicas()
+ ", decommissioned replicas: " + num.decommissionedReplicas()
+ ", excess replicas: " + num.excessReplicas()
- + ", Is Open File: " + fileINode.isUnderConstruction()
+ + ", Is Open File: " + (bc instanceof MutableBlockCollection)
+ ", Datanodes having this block: " + nodeList + ", Current Datanode: "
+ srcNode + ", Is current datanode decommissioning: "
+ srcNode.isDecommissionInProgress());
@@ -2639,8 +2637,8 @@ void processOverReplicatedBlocksOnReCommission(
final Iterator extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) {
final Block block = it.next();
- INodeFile fileINode = blocksMap.getINode(block);
- short expectedReplication = fileINode.getReplication();
+ BlockCollection bc = blocksMap.getBlockCollection(block);
+ short expectedReplication = bc.getReplication();
NumberReplicas num = countNodes(block);
int numCurrentReplica = num.liveReplicas();
if (numCurrentReplica > expectedReplication) {
@@ -2662,9 +2660,9 @@ boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
final Iterator extends Block> it = srcNode.getBlockIterator();
while(it.hasNext()) {
final Block block = it.next();
- INodeFile fileINode = blocksMap.getINode(block);
+ BlockCollection bc = blocksMap.getBlockCollection(block);
- if (fileINode != null) {
+ if (bc != null) {
NumberReplicas num = countNodes(block);
int curReplicas = num.liveReplicas();
int curExpectedReplicas = getReplication(block);
@@ -2679,7 +2677,7 @@ boolean isReplicationInProgress(DatanodeDescriptor srcNode) {
if ((curReplicas == 0) && (num.decommissionedReplicas() > 0)) {
decommissionOnlyReplicas++;
}
- if (fileINode.isUnderConstruction()) {
+ if (bc instanceof MutableBlockCollection) {
underReplicatedInOpenFiles++;
}
}
@@ -2782,12 +2780,11 @@ public void checkReplication(Block block, short numExpectedReplicas) {
/* get replication factor of a block */
private int getReplication(Block block) {
- INodeFile fileINode = blocksMap.getINode(block);
- if (fileINode == null) { // block does not belong to any file
+ BlockCollection bc = blocksMap.getBlockCollection(block);
+ if (bc == null) { // block does not belong to any file
return 0;
}
- assert !fileINode.isDirectory() : "Block cannot belong to a directory.";
- return fileINode.getReplication();
+ return bc.getReplication();
}
@@ -2859,12 +2856,12 @@ public long getMissingBlocksCount() {
return this.neededReplications.getCorruptBlockSize();
}
- public BlockInfo addINode(BlockInfo block, INodeFile iNode) {
- return blocksMap.addINode(block, iNode);
+ public BlockInfo addBlockCollection(BlockInfo block, BlockCollection bc) {
+ return blocksMap.addBlockCollection(block, bc);
}
- public INodeFile getINode(Block b) {
- return blocksMap.getINode(b);
+ public BlockCollection getBlockCollection(Block b) {
+ return blocksMap.getBlockCollection(b);
}
/** @return an iterator of the datanodes. */
@@ -3003,7 +3000,7 @@ public void clearQueues() {
private static class ReplicationWork {
private Block block;
- private INodeFile fileINode;
+ private BlockCollection bc;
private DatanodeDescriptor srcNode;
private List containingNodes;
@@ -3014,14 +3011,14 @@ private static class ReplicationWork {
private int priority;
public ReplicationWork(Block block,
- INodeFile fileINode,
+ BlockCollection bc,
DatanodeDescriptor srcNode,
List containingNodes,
List liveReplicaNodes,
int additionalReplRequired,
int priority) {
this.block = block;
- this.fileINode = fileINode;
+ this.bc = bc;
this.srcNode = srcNode;
this.containingNodes = containingNodes;
this.liveReplicaNodes = liveReplicaNodes;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
index b333972a262..e1efae54193 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
@@ -29,7 +29,6 @@
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.util.ReflectionUtils;
@@ -111,11 +110,11 @@ public abstract DatanodeDescriptor[] chooseTarget(String srcPath,
* choose numOfReplicas data nodes for writer
* If not, return as many as we can.
* The base implemenatation extracts the pathname of the file from the
- * specified srcInode, but this could be a costly operation depending on the
+ * specified srcBC, but this could be a costly operation depending on the
* file system implementation. Concrete implementations of this class should
* override this method to avoid this overhead.
*
- * @param srcInode The inode of the file for which chooseTarget is being invoked.
+ * @param srcBC block collection of file for which chooseTarget is invoked.
* @param numOfReplicas additional number of replicas wanted.
* @param writer the writer's machine, null if not in the cluster.
* @param chosenNodes datanodes that have been chosen as targets.
@@ -123,13 +122,13 @@ public abstract DatanodeDescriptor[] chooseTarget(String srcPath,
* @return array of DatanodeDescriptor instances chosen as target
* and sorted as a pipeline.
*/
- DatanodeDescriptor[] chooseTarget(FSInodeInfo srcInode,
+ DatanodeDescriptor[] chooseTarget(BlockCollection srcBC,
int numOfReplicas,
DatanodeDescriptor writer,
List chosenNodes,
HashMap excludedNodes,
long blocksize) {
- return chooseTarget(srcInode.getFullPathName(), numOfReplicas, writer,
+ return chooseTarget(srcBC.getName(), numOfReplicas, writer,
chosenNodes, excludedNodes, blocksize);
}
@@ -150,7 +149,7 @@ abstract public int verifyBlockPlacement(String srcPath,
* Decide whether deleting the specified replica of the block still makes
* the block conform to the configured block placement policy.
*
- * @param srcInode The inode of the file to which the block-to-be-deleted belongs
+ * @param srcBC block collection of file to which block-to-be-deleted belongs
* @param block The block to be deleted
* @param replicationFactor The required number of replicas for this block
* @param existingReplicas The replica locations of this block that are present
@@ -159,7 +158,7 @@ abstract public int verifyBlockPlacement(String srcPath,
listed in the previous parameter.
* @return the replica that is the best candidate for deletion
*/
- abstract public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo srcInode,
+ abstract public DatanodeDescriptor chooseReplicaToDelete(BlockCollection srcBC,
Block block,
short replicationFactor,
Collection existingReplicas,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 058d2e37aaa..a1e7a208ec7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -33,7 +33,6 @@
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;
@@ -547,7 +546,7 @@ public int verifyBlockPlacement(String srcPath,
}
@Override
- public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
+ public DatanodeDescriptor chooseReplicaToDelete(BlockCollection bc,
Block block,
short replicationFactor,
Collection first,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
index e479954d42f..6757ef486b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
@@ -20,13 +20,12 @@
import java.util.Iterator;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.server.namenode.INodeFile;
import org.apache.hadoop.hdfs.util.GSet;
import org.apache.hadoop.hdfs.util.LightWeightGSet;
/**
* This class maintains the map from a block to its metadata.
- * block's metadata currently includes INode it belongs to and
+ * block's metadata currently includes blockCollection it belongs to and
* the datanodes that store the block.
*/
class BlocksMap {
@@ -93,21 +92,21 @@ void close() {
blocks = null;
}
- INodeFile getINode(Block b) {
+ BlockCollection getBlockCollection(Block b) {
BlockInfo info = blocks.get(b);
- return (info != null) ? info.getINode() : null;
+ return (info != null) ? info.getBlockCollection() : null;
}
/**
- * Add block b belonging to the specified file inode to the map.
+ * Add block b belonging to the specified block collection to the map.
*/
- BlockInfo addINode(BlockInfo b, INodeFile iNode) {
+ BlockInfo addBlockCollection(BlockInfo b, BlockCollection bc) {
BlockInfo info = blocks.get(b);
if (info != b) {
info = b;
blocks.put(info);
}
- info.setINode(iNode);
+ info.setBlockCollection(bc);
return info;
}
@@ -121,7 +120,7 @@ void removeBlock(Block block) {
if (blockInfo == null)
return;
- blockInfo.setINode(null);
+ blockInfo.setBlockCollection(null);
for(int idx = blockInfo.numNodes()-1; idx >= 0; idx--) {
DatanodeDescriptor dn = blockInfo.getDatanode(idx);
dn.removeBlock(blockInfo); // remove from the list and wipe the location
@@ -169,7 +168,7 @@ boolean removeNode(Block b, DatanodeDescriptor node) {
boolean removed = node.removeBlock(info);
if (info.getDatanode(0) == null // no datanodes left
- && info.getINode() == null) { // does not belong to a file
+ && info.getBlockCollection() == null) { // does not belong to a file
blocks.remove(b); // remove block from the map
}
return removed;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java
similarity index 50%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java
index 40a474aba77..2b5b3e4dd27 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/MutableBlockCollection.java
@@ -15,24 +15,30 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-package org.apache.hadoop.hdfs.server.namenode;
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.fs.ContentSummary;
/**
- * This interface is used used the pluggable block placement policy
- * to expose a few characteristics of an Inode.
+ * This interface is used by the block manager to expose a
+ * few characteristics of a collection of Block/BlockUnderConstruction.
*/
-@InterfaceAudience.Private
-public interface FSInodeInfo {
+public interface MutableBlockCollection extends BlockCollection {
+ /**
+ * Set block
+ */
+ public void setBlock(int idx, BlockInfo blk);
/**
- * a string representation of an inode
- *
- * @return the full pathname (from root) that this inode represents
+ * Convert the last block of the collection to an under-construction block.
+ * Set its locations.
*/
-
- public String getFullPathName() ;
+ public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
+ DatanodeDescriptor[] targets) throws IOException;
}
-
-
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index f018f53e731..10049e8871c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -235,6 +235,9 @@ void scheduleBlockReport(long delay) {
}
void reportBadBlocks(ExtendedBlock block) {
+ if (bpRegistration == null) {
+ return;
+ }
DatanodeInfo[] dnArr = { new DatanodeInfo(bpRegistration) };
LocatedBlock[] blocks = { new LocatedBlock(block, dnArr) };
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 6bb78df6bc8..fdcfa569705 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -860,7 +860,7 @@ String getStorageId() {
*/
public String getDisplayName() {
// NB: our DatanodeID may not be set yet
- return hostName + ":" + getIpcPort();
+ return hostName + ":" + getXferPort();
}
/**
@@ -877,7 +877,6 @@ public InetSocketAddress getXferAddress() {
/**
* @return the datanode's IPC port
*/
- @VisibleForTesting
public int getIpcPort() {
return ipcServer.getListenerAddress().getPort();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index 995840066db..6c280d8767d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -60,6 +60,7 @@
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.SocketInputWrapper;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.DataChecksum;
@@ -83,13 +84,30 @@ class DataXceiver extends Receiver implements Runnable {
private final DataXceiverServer dataXceiverServer;
private long opStartTime; //the start time of receiving an Op
+ private final SocketInputWrapper socketInputWrapper;
+
+ /**
+ * Client Name used in previous operation. Not available on first request
+ * on the socket.
+ */
+ private String previousOpClientName;
- public DataXceiver(Socket s, DataNode datanode,
+ public static DataXceiver create(Socket s, DataNode dn,
+ DataXceiverServer dataXceiverServer) throws IOException {
+
+ SocketInputWrapper iw = NetUtils.getInputStream(s);
+ return new DataXceiver(s, iw, dn, dataXceiverServer);
+ }
+
+ private DataXceiver(Socket s,
+ SocketInputWrapper socketInput,
+ DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
super(new DataInputStream(new BufferedInputStream(
- NetUtils.getInputStream(s), HdfsConstants.SMALL_BUFFER_SIZE)));
+ socketInput, HdfsConstants.SMALL_BUFFER_SIZE)));
this.s = s;
+ this.socketInputWrapper = socketInput;
this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
this.datanode = datanode;
this.dnConf = datanode.getDnConf();
@@ -110,7 +128,11 @@ public DataXceiver(Socket s, DataNode datanode,
*/
private void updateCurrentThreadName(String status) {
StringBuilder sb = new StringBuilder();
- sb.append("DataXceiver for client ").append(remoteAddress);
+ sb.append("DataXceiver for client ");
+ if (previousOpClientName != null) {
+ sb.append(previousOpClientName).append(" at ");
+ }
+ sb.append(remoteAddress);
if (status != null) {
sb.append(" [").append(status).append("]");
}
@@ -128,8 +150,6 @@ public void run() {
Op op = null;
dataXceiverServer.childSockets.add(s);
try {
- int stdTimeout = s.getSoTimeout();
-
// We process requests in a loop, and stay around for a short timeout.
// This optimistic behaviour allows the other end to reuse connections.
// Setting keepalive timeout to 0 disable this behavior.
@@ -139,7 +159,9 @@ public void run() {
try {
if (opsProcessed != 0) {
assert dnConf.socketKeepaliveTimeout > 0;
- s.setSoTimeout(dnConf.socketKeepaliveTimeout);
+ socketInputWrapper.setTimeout(dnConf.socketKeepaliveTimeout);
+ } else {
+ socketInputWrapper.setTimeout(dnConf.socketTimeout);
}
op = readOp();
} catch (InterruptedIOException ignored) {
@@ -160,7 +182,7 @@ public void run() {
// restore normal timeout
if (opsProcessed != 0) {
- s.setSoTimeout(stdTimeout);
+ s.setSoTimeout(dnConf.socketTimeout);
}
opStartTime = now();
@@ -190,6 +212,8 @@ public void readBlock(final ExtendedBlock block,
final String clientName,
final long blockOffset,
final long length) throws IOException {
+ previousOpClientName = clientName;
+
OutputStream baseStream = NetUtils.getOutputStream(s,
dnConf.socketWriteTimeout);
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
@@ -283,7 +307,8 @@ public void writeBlock(final ExtendedBlock block,
final long maxBytesRcvd,
final long latestGenerationStamp,
DataChecksum requestedChecksum) throws IOException {
- updateCurrentThreadName("Receiving block " + block + " client=" + clientname);
+ previousOpClientName = clientname;
+ updateCurrentThreadName("Receiving block " + block);
final boolean isDatanode = clientname.length() == 0;
final boolean isClient = !isDatanode;
final boolean isTransfer = stage == BlockConstructionStage.TRANSFER_RBW
@@ -490,7 +515,7 @@ public void transferBlock(final ExtendedBlock blk,
final DatanodeInfo[] targets) throws IOException {
checkAccess(null, true, blk, blockToken,
Op.TRANSFER_BLOCK, BlockTokenSecretManager.AccessMode.COPY);
-
+ previousOpClientName = clientName;
updateCurrentThreadName(Op.TRANSFER_BLOCK + " " + blk);
final DataOutputStream out = new DataOutputStream(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index f32b2968f52..bb0f7fd81b4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -135,6 +135,7 @@ public void run() {
try {
s = ss.accept();
s.setTcpNoDelay(true);
+ // Timeouts are set within DataXceiver.run()
// Make sure the xceiver count is not exceeded
int curXceiverCount = datanode.getXceiverCount();
@@ -144,7 +145,8 @@ public void run() {
+ maxXceiverCount);
}
- new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this))
+ new Daemon(datanode.threadGroup,
+ DataXceiver.create(s, datanode, this))
.start();
} catch (SocketTimeoutException ignored) {
// wake up to see if should continue to run
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
index f7da29b4c9d..2d1ff6437b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
@@ -29,6 +29,7 @@
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
import org.apache.hadoop.http.HttpServer;
+import org.apache.hadoop.security.UserGroupInformation;
import org.mortbay.jetty.nio.SelectChannelConnector;
/**
@@ -60,10 +61,7 @@ public SecureResources(ServerSocket streamingSocket,
@Override
public void init(DaemonContext context) throws Exception {
System.err.println("Initializing secure datanode resources");
- // We should only start up a secure datanode in a Kerberos-secured cluster
- Configuration conf = new Configuration(); // Skip UGI method to not log in
- if(!conf.get(HADOOP_SECURITY_AUTHENTICATION).equals("kerberos"))
- throw new RuntimeException("Cannot start secure datanode in unsecure cluster");
+ Configuration conf = new Configuration();
// Stash command-line arguments for regular datanode
args = context.getArguments();
@@ -98,7 +96,8 @@ public void init(DaemonContext context) throws Exception {
System.err.println("Successfully obtained privileged resources (streaming port = "
+ ss + " ) (http listener port = " + listener.getConnection() +")");
- if (ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) {
+ if ((ss.getLocalPort() >= 1023 || listener.getPort() >= 1023) &&
+ UserGroupInformation.isSecurityEnabled()) {
throw new RuntimeException("Cannot start secure datanode with unprivileged ports");
}
System.err.println("Opened streaming server at " + streamingAddr);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index ab0f4c4dddd..d4239288a4d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -309,7 +309,7 @@ INodeDirectory addToParent(byte[] src, INodeDirectory parentINode,
INodeFile newF = (INodeFile)newNode;
BlockInfo[] blocks = newF.getBlocks();
for (int i = 0; i < blocks.length; i++) {
- newF.setBlock(i, getBlockManager().addINode(blocks[i], newF));
+ newF.setBlock(i, getBlockManager().addBlockCollection(blocks[i], newF));
}
}
} finally {
@@ -346,7 +346,7 @@ BlockInfo addBlock(String path,
fileINode.getReplication(),
BlockUCState.UNDER_CONSTRUCTION,
targets);
- getBlockManager().addINode(blockInfo, fileINode);
+ getBlockManager().addBlockCollection(blockInfo, fileINode);
fileINode.addBlock(blockInfo);
if(NameNode.stateChangeLog.isDebugEnabled()) {
@@ -1127,7 +1127,7 @@ public void replaceNode(String path, INodeFile oldnode, INodeFile newnode)
int index = 0;
for (BlockInfo b : newnode.getBlocks()) {
- BlockInfo info = getBlockManager().addINode(b, newnode);
+ BlockInfo info = getBlockManager().addBlockCollection(b, newnode);
newnode.setBlock(index, info); // inode refers to the block in BlocksMap
index++;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
index 8f2b107e798..76c661d8297 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java
@@ -601,7 +601,7 @@ private void updateBlocks(FSDirectory fsDir, BlockListUpdatingOp op,
// OP_ADD operations as each block is allocated.
newBI = new BlockInfo(newBlock, file.getReplication());
}
- fsNamesys.getBlockManager().addINode(newBI, file);
+ fsNamesys.getBlockManager().addBlockCollection(newBI, file);
file.addBlock(newBI);
fsNamesys.getBlockManager().processQueuedMessagesForBlock(newBlock);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
index 56a610f101e..9f7742cc674 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
@@ -203,6 +203,10 @@ T setBlockSize(long blockSize) {
}
T setBlocks(Block[] blocks) {
+ if (blocks.length > MAX_BLOCKS) {
+ throw new RuntimeException("Can't have more than " + MAX_BLOCKS +
+ " in an AddCloseOp.");
+ }
this.blocks = blocks;
return (T)this;
}
@@ -296,10 +300,18 @@ void readFields(DataInputStream in, int logVersion)
}
}
+ static final public int MAX_BLOCKS = 1024 * 1024 * 64;
+
private static Block[] readBlocks(
DataInputStream in,
int logVersion) throws IOException {
int numBlocks = in.readInt();
+ if (numBlocks < 0) {
+ throw new IOException("invalid negative number of blocks");
+ } else if (numBlocks > MAX_BLOCKS) {
+ throw new IOException("invalid number of blocks: " + numBlocks +
+ ". The maximum number of blocks per file is " + MAX_BLOCKS);
+ }
Block[] blocks = new Block[numBlocks];
for (int i = 0; i < numBlocks; i++) {
Block blk = new Block();
@@ -579,6 +591,7 @@ static class ConcatDeleteOp extends FSEditLogOp {
String trg;
String[] srcs;
long timestamp;
+ final static public int MAX_CONCAT_SRC = 1024 * 1024;
private ConcatDeleteOp() {
super(OP_CONCAT_DELETE);
@@ -594,7 +607,12 @@ ConcatDeleteOp setTarget(String trg) {
}
ConcatDeleteOp setSources(String[] srcs) {
+ if (srcs.length > MAX_CONCAT_SRC) {
+ throw new RuntimeException("ConcatDeleteOp can only have " +
+ MAX_CONCAT_SRC + " sources at most.");
+ }
this.srcs = srcs;
+
return this;
}
@@ -624,8 +642,8 @@ void readFields(DataInputStream in, int logVersion)
if (!LayoutVersion.supports(Feature.EDITLOG_OP_OPTIMIZATION, logVersion)) {
this.length = in.readInt();
if (length < 3) { // trg, srcs.., timestamp
- throw new IOException("Incorrect data format. "
- + "Concat delete operation.");
+ throw new IOException("Incorrect data format " +
+ "for ConcatDeleteOp.");
}
}
this.trg = FSImageSerialization.readString(in);
@@ -635,6 +653,15 @@ void readFields(DataInputStream in, int logVersion)
} else {
srcSize = this.length - 1 - 1; // trg and timestamp
}
+ if (srcSize < 0) {
+ throw new IOException("Incorrect data format. "
+ + "ConcatDeleteOp cannot have a negative number of data " +
+ " sources.");
+ } else if (srcSize > MAX_CONCAT_SRC) {
+ throw new IOException("Incorrect data format. "
+ + "ConcatDeleteOp can have at most " + MAX_CONCAT_SRC +
+ " sources, but we tried to have " + (length - 3) + " sources.");
+ }
this.srcs = new String [srcSize];
for(int i=0; i listCorruptFileBlocks(String path,
while (blkIterator.hasNext()) {
Block blk = blkIterator.next();
- INode inode = blockManager.getINode(blk);
+ INode inode = (INodeFile) blockManager.getBlockCollection(blk);
skip++;
if (inode != null && blockManager.countNodes(blk).liveReplicas() == 0) {
String src = FSDirectory.getFullPathName(inode);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
index 3b29a70f342..eab5f71bad0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
@@ -27,6 +27,8 @@
import javax.servlet.http.HttpServlet;
import javax.servlet.http.HttpServletRequest;
import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.commons.logging.Log;
@@ -34,7 +36,6 @@
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -83,11 +84,11 @@ public void doGet(final HttpServletRequest request,
(Configuration)getServletContext().getAttribute(JspHelper.CURRENT_CONF);
if(UserGroupInformation.isSecurityEnabled() &&
- !isValidRequestor(request.getRemoteUser(), conf)) {
+ !isValidRequestor(request.getUserPrincipal().getName(), conf)) {
response.sendError(HttpServletResponse.SC_FORBIDDEN,
"Only Namenode and Secondary Namenode may access this servlet");
LOG.warn("Received non-NN/SNN request for image or edits from "
- + request.getRemoteHost());
+ + request.getUserPrincipal().getName() + " at " + request.getRemoteHost());
return;
}
@@ -156,15 +157,10 @@ public Void run() throws Exception {
}
// issue a HTTP get request to download the new fsimage
- MD5Hash downloadImageDigest = reloginIfNecessary().doAs(
- new PrivilegedExceptionAction() {
- @Override
- public MD5Hash run() throws Exception {
- return TransferFsImage.downloadImageToStorage(
+ MD5Hash downloadImageDigest =
+ TransferFsImage.downloadImageToStorage(
parsedParams.getInfoServer(), txid,
nnImage.getStorage(), true);
- }
- });
nnImage.saveDigestAndRenameCheckpointImage(txid, downloadImageDigest);
// Now that we have a new checkpoint, we might be able to
@@ -176,18 +172,6 @@ public MD5Hash run() throws Exception {
}
return null;
}
-
- // We may have lost our ticket since the last time we tried to open
- // an http connection, so log in just in case.
- private UserGroupInformation reloginIfNecessary() throws IOException {
- // This method is only called on the NN, therefore it is safe to
- // use these key values.
- return UserGroupInformation.loginUserFromKeytabAndReturnUGI(
- SecurityUtil.getServerPrincipal(conf
- .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
- NameNode.getAddress(conf).getHostName()),
- conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY));
- }
});
} catch (Throwable t) {
@@ -232,18 +216,10 @@ static boolean isValidRequestor(String remoteUser, Configuration conf)
Set validRequestors = new HashSet();
- validRequestors.add(
- SecurityUtil.getServerPrincipal(conf
- .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), NameNode
- .getAddress(conf).getHostName()));
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY), NameNode
.getAddress(conf).getHostName()));
- validRequestors.add(
- SecurityUtil.getServerPrincipal(conf
- .get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
- SecondaryNameNode.getHttpAddress(conf).getHostName()));
validRequestors.add(
SecurityUtil.getServerPrincipal(conf
.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_USER_NAME_KEY),
@@ -251,10 +227,6 @@ static boolean isValidRequestor(String remoteUser, Configuration conf)
if (HAUtil.isHAEnabled(conf, DFSUtil.getNamenodeNameServiceId(conf))) {
Configuration otherNnConf = HAUtil.getConfForOtherNode(conf);
- validRequestors.add(
- SecurityUtil.getServerPrincipal(otherNnConf
- .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
- NameNode.getAddress(otherNnConf).getHostName()));
validRequestors.add(
SecurityUtil.getServerPrincipal(otherNnConf
.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
@@ -263,11 +235,11 @@ static boolean isValidRequestor(String remoteUser, Configuration conf)
for(String v : validRequestors) {
if(v != null && v.equals(remoteUser)) {
- if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is allowing: " + remoteUser);
+ if(LOG.isInfoEnabled()) LOG.info("GetImageServlet allowing: " + remoteUser);
return true;
}
}
- if(LOG.isDebugEnabled()) LOG.debug("isValidRequestor is rejecting: " + remoteUser);
+ if(LOG.isInfoEnabled()) LOG.info("GetImageServlet rejecting: " + remoteUser);
return false;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index cdad315f7a4..e940b61ab9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -30,13 +30,15 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.util.StringUtils;
+import com.google.common.primitives.SignedBytes;
+
/**
* We keep an in-memory representation of the file/block hierarchy.
* This is a base INode class containing common fields for file and
* directory inodes.
*/
@InterfaceAudience.Private
-abstract class INode implements Comparable, FSInodeInfo {
+abstract class INode implements Comparable {
/*
* The inode name is in java UTF8 encoding;
* The name in HdfsFileStatus should keep the same encoding as this.
@@ -143,8 +145,7 @@ protected void setPermissionStatus(PermissionStatus ps) {
protected PermissionStatus getPermissionStatus() {
return new PermissionStatus(getUserName(),getGroupName(),getFsPermission());
}
- private synchronized void updatePermissionStatus(
- PermissionStatusFormat f, long n) {
+ private void updatePermissionStatus(PermissionStatusFormat f, long n) {
permission = f.combine(n, permission);
}
/** Get user name */
@@ -263,7 +264,6 @@ void setLocalName(byte[] name) {
this.name = name;
}
- @Override
public String getFullPathName() {
// Get the full path name of this inode.
return FSDirectory.getFullPathName(this);
@@ -400,48 +400,30 @@ boolean removeNode() {
}
}
- //
- // Comparable interface
- //
- public int compareTo(byte[] o) {
- return compareBytes(name, o);
+ private static final byte[] EMPTY_BYTES = {};
+
+ @Override
+ public final int compareTo(byte[] bytes) {
+ final byte[] left = name == null? EMPTY_BYTES: name;
+ final byte[] right = bytes == null? EMPTY_BYTES: bytes;
+ return SignedBytes.lexicographicalComparator().compare(left, right);
}
- public boolean equals(Object o) {
- if (!(o instanceof INode)) {
+ @Override
+ public final boolean equals(Object that) {
+ if (this == that) {
+ return true;
+ }
+ if (that == null || !(that instanceof INode)) {
return false;
}
- return Arrays.equals(this.name, ((INode)o).name);
+ return Arrays.equals(this.name, ((INode)that).name);
}
- public int hashCode() {
+ @Override
+ public final int hashCode() {
return Arrays.hashCode(this.name);
}
-
- //
- // static methods
- //
- /**
- * Compare two byte arrays.
- *
- * @return a negative integer, zero, or a positive integer
- * as defined by {@link #compareTo(byte[])}.
- */
- static int compareBytes(byte[] a1, byte[] a2) {
- if (a1==a2)
- return 0;
- int len1 = (a1==null ? 0 : a1.length);
- int len2 = (a2==null ? 0 : a2.length);
- int n = Math.min(len1, len2);
- byte b1, b2;
- for (int i=0; i v) {
if(blocks != null && v != null) {
for (BlockInfo blk : blocks) {
v.add(blk);
- blk.setINode(null);
+ blk.setBlockCollection(null);
}
}
blocks = null;
return 1;
}
+
+ public String getName() {
+ // Get the full path name of this inode.
+ return getFullPathName();
+ }
+
@Override
long[] computeContentSummary(long[] summary) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
index c5c47fd6461..66e33e077d2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
@@ -25,13 +25,15 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import org.apache.hadoop.hdfs.server.blockmanagement.MutableBlockCollection;
import com.google.common.base.Joiner;
/**
* I-node for file being written.
*/
-public class INodeFileUnderConstruction extends INodeFile {
+public class INodeFileUnderConstruction extends INodeFile
+ implements MutableBlockCollection {
private String clientName; // lease holder
private final String clientMachine;
private final DatanodeDescriptor clientNode; // if client is a cluster node too.
@@ -154,7 +156,7 @@ public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
BlockInfoUnderConstruction ucBlock =
lastBlock.convertToBlockUnderConstruction(
BlockUCState.UNDER_CONSTRUCTION, targets);
- ucBlock.setINode(this);
+ ucBlock.setBlockCollection(this);
setBlock(numBlocks()-1, ucBlock);
return ucBlock;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
index 7779a2fd447..7d26428a875 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
@@ -174,10 +174,8 @@ public static enum OperationCategory {
DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY,
DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
DFS_NAMENODE_HTTP_ADDRESS_KEY,
- DFS_NAMENODE_HTTPS_ADDRESS_KEY,
DFS_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY,
- DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY,
DFS_NAMENODE_BACKUP_ADDRESS_KEY,
DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY,
@@ -229,6 +227,7 @@ public long getProtocolVersion(String protocol,
private final boolean haEnabled;
private final HAContext haContext;
protected boolean allowStaleStandbyReads;
+ private Runtime runtime = Runtime.getRuntime();
/** httpServer */
@@ -382,8 +381,9 @@ public static InetSocketAddress getHttpAddress(Configuration conf) {
}
protected void setHttpServerAddress(Configuration conf) {
- conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY,
- NetUtils.getHostPortString(getHttpAddress()));
+ String hostPort = NetUtils.getHostPortString(getHttpAddress());
+ conf.set(DFS_NAMENODE_HTTP_ADDRESS_KEY, hostPort);
+ LOG.info("Web-server up at: " + hostPort);
}
protected void loadNamesystem(Configuration conf) throws IOException {
@@ -503,11 +503,16 @@ private void stopCommonServices() {
}
private void startTrashEmptier(Configuration conf) throws IOException {
- long trashInterval
- = conf.getLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
- CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
- if(trashInterval == 0)
+ long trashInterval = conf.getLong(
+ CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY,
+ CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
+ if (trashInterval == 0) {
return;
+ } else if (trashInterval < 0) {
+ throw new IOException("Cannot start tresh emptier with negative interval."
+ + " Set " + CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY + " to a"
+ + " positive value.");
+ }
this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
this.emptier.setDaemon(true);
this.emptier.start();
@@ -1151,23 +1156,21 @@ public static NameNode createNameNode(String argv[], Configuration conf)
*/
public static void initializeGenericKeys(Configuration conf,
String nameserviceId, String namenodeId) {
- if ((nameserviceId == null || nameserviceId.isEmpty()) &&
- (namenodeId == null || namenodeId.isEmpty())) {
- return;
+ if ((nameserviceId != null && !nameserviceId.isEmpty()) ||
+ (namenodeId != null && !namenodeId.isEmpty())) {
+ if (nameserviceId != null) {
+ conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
+ }
+ if (namenodeId != null) {
+ conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
+ }
+
+ DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
+ NAMENODE_SPECIFIC_KEYS);
+ DFSUtil.setGenericConf(conf, nameserviceId, null,
+ NAMESERVICE_SPECIFIC_KEYS);
}
- if (nameserviceId != null) {
- conf.set(DFS_FEDERATION_NAMESERVICE_ID, nameserviceId);
- }
- if (namenodeId != null) {
- conf.set(DFS_HA_NAMENODE_ID_KEY, namenodeId);
- }
-
- DFSUtil.setGenericConf(conf, nameserviceId, namenodeId,
- NAMENODE_SPECIFIC_KEYS);
- DFSUtil.setGenericConf(conf, nameserviceId, null,
- NAMESERVICE_SPECIFIC_KEYS);
-
if (conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY) != null) {
URI defaultUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://"
+ conf.get(DFS_NAMENODE_RPC_ADDRESS_KEY));
@@ -1262,14 +1265,37 @@ synchronized HAServiceState getServiceState() {
}
return state.getServiceState();
}
+
+ @VisibleForTesting
+ public synchronized void setRuntimeForTesting(Runtime runtime) {
+ this.runtime = runtime;
+ }
/**
- * Class used as expose {@link NameNode} as context to {@link HAState}
+ * Shutdown the NN immediately in an ungraceful way. Used when it would be
+ * unsafe for the NN to continue operating, e.g. during a failed HA state
+ * transition.
*
- * TODO(HA):
- * When entering and exiting state, on failing to start services,
- * appropriate action is needed todo either shutdown the node or recover
- * from failure.
+ * @param t exception which warrants the shutdown. Printed to the NN log
+ * before exit.
+ * @throws ServiceFailedException thrown only for testing.
+ */
+ private synchronized void doImmediateShutdown(Throwable t)
+ throws ServiceFailedException {
+ String message = "Error encountered requiring NN shutdown. " +
+ "Shutting down immediately.";
+ try {
+ LOG.fatal(message, t);
+ } catch (Throwable ignored) {
+ // This is unlikely to happen, but there's nothing we can do if it does.
+ }
+ runtime.exit(1);
+ // This code is only reached during testing, when runtime is stubbed out.
+ throw new ServiceFailedException(message, t);
+ }
+
+ /**
+ * Class used to expose {@link NameNode} as context to {@link HAState}
*/
protected class NameNodeHAContext implements HAContext {
@Override
@@ -1284,32 +1310,52 @@ public HAState getState() {
@Override
public void startActiveServices() throws IOException {
- namesystem.startActiveServices();
- startTrashEmptier(conf);
+ try {
+ namesystem.startActiveServices();
+ startTrashEmptier(conf);
+ } catch (Throwable t) {
+ doImmediateShutdown(t);
+ }
}
@Override
public void stopActiveServices() throws IOException {
- if (namesystem != null) {
- namesystem.stopActiveServices();
+ try {
+ if (namesystem != null) {
+ namesystem.stopActiveServices();
+ }
+ stopTrashEmptier();
+ } catch (Throwable t) {
+ doImmediateShutdown(t);
}
- stopTrashEmptier();
}
@Override
public void startStandbyServices() throws IOException {
- namesystem.startStandbyServices(conf);
+ try {
+ namesystem.startStandbyServices(conf);
+ } catch (Throwable t) {
+ doImmediateShutdown(t);
+ }
}
@Override
public void prepareToStopStandbyServices() throws ServiceFailedException {
- namesystem.prepareToStopStandbyServices();
+ try {
+ namesystem.prepareToStopStandbyServices();
+ } catch (Throwable t) {
+ doImmediateShutdown(t);
+ }
}
@Override
public void stopStandbyServices() throws IOException {
- if (namesystem != null) {
- namesystem.stopStandbyServices();
+ try {
+ if (namesystem != null) {
+ namesystem.stopStandbyServices();
+ }
+ } catch (Throwable t) {
+ doImmediateShutdown(t);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index c40a5b29728..2e62b8a1093 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY;
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT;
@@ -43,6 +44,7 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList;
/**
@@ -78,127 +80,101 @@ private String getDefaultServerPrincipal() throws IOException {
conf.get(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY),
nn.getNameNodeAddress().getHostName());
}
-
+
public void start() throws IOException {
final String infoHost = bindAddress.getHostName();
-
- if(UserGroupInformation.isSecurityEnabled()) {
- String httpsUser = SecurityUtil.getServerPrincipal(conf
- .get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY), infoHost);
- if (httpsUser == null) {
- LOG.warn(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY
- + " not defined in config. Starting http server as "
- + getDefaultServerPrincipal()
- + ": Kerberized SSL may be not function correctly.");
- } else {
- // Kerberized SSL servers must be run from the host principal...
- LOG.info("Logging in as " + httpsUser + " to start http server.");
- SecurityUtil.login(conf, DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY,
- DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY, infoHost);
- }
- }
+ int infoPort = bindAddress.getPort();
- UserGroupInformation ugi = UserGroupInformation.getLoginUser();
- try {
- this.httpServer = ugi.doAs(new PrivilegedExceptionAction() {
- @Override
- public HttpServer run() throws IOException, InterruptedException {
- int infoPort = bindAddress.getPort();
- httpServer = new HttpServer("hdfs", infoHost, infoPort,
- infoPort == 0, conf,
- new AccessControlList(conf.get(DFSConfigKeys.DFS_ADMIN, " "))) {
- {
- if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
- //add SPNEGO authentication filter for webhdfs
- final String name = "SPNEGO";
- final String classname = AuthFilter.class.getName();
- final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
- Map params = getAuthFilterParams(conf);
- defineFilter(webAppContext, name, classname, params,
- new String[]{pathSpec});
- LOG.info("Added filter '" + name + "' (class=" + classname + ")");
-
- // add webhdfs packages
- addJerseyResourcePackage(
- NamenodeWebHdfsMethods.class.getPackage().getName()
- + ";" + Param.class.getPackage().getName(), pathSpec);
- }
+ httpServer = new HttpServer("hdfs", infoHost, infoPort,
+ infoPort == 0, conf,
+ new AccessControlList(conf.get(DFS_ADMIN, " "))) {
+ {
+ // Add SPNEGO support to NameNode
+ if (UserGroupInformation.isSecurityEnabled()) {
+ Map params = new HashMap();
+ String principalInConf = conf.get(
+ DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
+ if (principalInConf != null && !principalInConf.isEmpty()) {
+ params.put("kerberos.principal",
+ SecurityUtil.getServerPrincipal(principalInConf, infoHost));
+ String httpKeytab = conf.get(DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY);
+ if (httpKeytab != null && !httpKeytab.isEmpty()) {
+ params.put("kerberos.keytab", httpKeytab);
}
- private Map getAuthFilterParams(Configuration conf)
- throws IOException {
- Map params = new HashMap();
- String principalInConf = conf
- .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
- if (principalInConf != null && !principalInConf.isEmpty()) {
- params
- .put(
- DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
- SecurityUtil.getServerPrincipal(principalInConf,
- infoHost));
- }
- String httpKeytab = conf
- .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
- if (httpKeytab != null && !httpKeytab.isEmpty()) {
- params.put(
- DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
- httpKeytab);
- }
- return params;
- }
- };
+ params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
- boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false);
- boolean useKrb = UserGroupInformation.isSecurityEnabled();
- if (certSSL || useKrb) {
- boolean needClientAuth = conf.getBoolean(
- DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_KEY,
- DFSConfigKeys.DFS_CLIENT_HTTPS_NEED_AUTH_DEFAULT);
- InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(conf
- .get(DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_KEY,
- DFSConfigKeys.DFS_NAMENODE_HTTPS_ADDRESS_DEFAULT));
- Configuration sslConf = new HdfsConfiguration(false);
- if (certSSL) {
- sslConf.addResource(conf.get(DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_KEY,
- DFS_SERVER_HTTPS_KEYSTORE_RESOURCE_DEFAULT));
- }
- httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth,
- useKrb);
- // assume same ssl port for all datanodes
- InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(
- conf.get(DFS_DATANODE_HTTPS_ADDRESS_KEY,
- infoHost + ":" + DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT));
- httpServer.setAttribute(DFSConfigKeys.DFS_DATANODE_HTTPS_PORT_KEY,
- datanodeSslPort.getPort());
+ defineFilter(webAppContext, SPNEGO_FILTER,
+ AuthenticationFilter.class.getName(), params, null);
}
- httpServer.setAttribute(NAMENODE_ATTRIBUTE_KEY, nn);
- httpServer.setAttribute(NAMENODE_ADDRESS_ATTRIBUTE_KEY,
- nn.getNameNodeAddress());
- httpServer.setAttribute(FSIMAGE_ATTRIBUTE_KEY, nn.getFSImage());
- httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
- setupServlets(httpServer, conf);
- httpServer.start();
-
- // The web-server port can be ephemeral... ensure we have the correct
- // info
- infoPort = httpServer.getPort();
- httpAddress = new InetSocketAddress(infoHost, infoPort);
- LOG.info(nn.getRole() + " Web-server up at: " + httpAddress);
- return httpServer;
}
- });
- } catch (InterruptedException e) {
- throw new IOException(e);
- } finally {
- if(UserGroupInformation.isSecurityEnabled() &&
- conf.get(DFSConfigKeys.DFS_NAMENODE_KRB_HTTPS_USER_NAME_KEY) != null) {
- // Go back to being the correct Namenode principal
- LOG.info("Logging back in as NameNode user following http server start");
- nn.loginAsNameNodeUser(conf);
+ if (WebHdfsFileSystem.isEnabled(conf, LOG)) {
+ //add SPNEGO authentication filter for webhdfs
+ final String name = "SPNEGO";
+ final String classname = AuthFilter.class.getName();
+ final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+ Map params = getAuthFilterParams(conf);
+ defineFilter(webAppContext, name, classname, params,
+ new String[]{pathSpec});
+ LOG.info("Added filter '" + name + "' (class=" + classname + ")");
+
+ // add webhdfs packages
+ addJerseyResourcePackage(
+ NamenodeWebHdfsMethods.class.getPackage().getName()
+ + ";" + Param.class.getPackage().getName(), pathSpec);
+ }
}
+
+ private Map getAuthFilterParams(Configuration conf)
+ throws IOException {
+ Map params = new HashMap();
+ String principalInConf = conf
+ .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
+ if (principalInConf != null && !principalInConf.isEmpty()) {
+ params
+ .put(
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+ SecurityUtil.getServerPrincipal(principalInConf,
+ bindAddress.getHostName()));
+ }
+ String httpKeytab = conf
+ .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
+ if (httpKeytab != null && !httpKeytab.isEmpty()) {
+ params.put(
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+ httpKeytab);
+ }
+ return params;
+ }
+ };
+
+ boolean certSSL = conf.getBoolean("dfs.https.enable", false);
+ if (certSSL) {
+ boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false);
+ InetSocketAddress secInfoSocAddr = NetUtils.createSocketAddr(infoHost + ":" + conf.get(
+ "dfs.https.port", infoHost + ":" + 0));
+ Configuration sslConf = new Configuration(false);
+ if (certSSL) {
+ sslConf.addResource(conf.get("dfs.https.server.keystore.resource",
+ "ssl-server.xml"));
+ }
+ httpServer.addSslListener(secInfoSocAddr, sslConf, needClientAuth);
+ // assume same ssl port for all datanodes
+ InetSocketAddress datanodeSslPort = NetUtils.createSocketAddr(conf.get(
+ "dfs.datanode.https.address", infoHost + ":" + 50475));
+ httpServer.setAttribute("datanode.https.port", datanodeSslPort
+ .getPort());
}
+ httpServer.setAttribute("name.node", nn);
+ httpServer.setAttribute("name.node.address", bindAddress);
+ httpServer.setAttribute("name.system.image", nn.getFSImage());
+ httpServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+ setupServlets(httpServer, conf);
+ httpServer.start();
+ httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort());
}
-
+
+
public void stop() throws Exception {
if (httpServer != null) {
httpServer.stop();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
index 2dfa59751ff..f284aaa6702 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
@@ -734,7 +734,7 @@ static class XMLBlockInfo {
this.inode = null;
} else {
this.block = new Block(blockId);
- this.inode = blockManager.getINode(block);
+ this.inode = (INodeFile) blockManager.getBlockCollection(block);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
index 2ddb9f85a6e..7c02c644da8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
@@ -25,8 +25,10 @@
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.Date;
+import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import org.apache.commons.cli.CommandLine;
import org.apache.commons.cli.CommandLineParser;
@@ -44,6 +46,7 @@
import org.apache.hadoop.fs.FileSystem;
import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.hdfs.NameNodeProxies;
@@ -63,9 +66,9 @@
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.Krb5AndCertsSslSocketConnector;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.util.Daemon;
@@ -108,7 +111,6 @@ public class SecondaryNameNode implements Runnable {
private volatile boolean shouldRun;
private HttpServer infoServer;
private int infoPort;
- private int imagePort;
private String infoBindAddress;
private Collection checkpointDirs;
@@ -229,63 +231,47 @@ private void initialize(final Configuration conf,
// Initialize other scheduling parameters from the configuration
checkpointConf = new CheckpointConf(conf);
-
- // initialize the webserver for uploading files.
- // Kerberized SSL servers must be run from the host principal...
- UserGroupInformation httpUGI =
- UserGroupInformation.loginUserFromKeytabAndReturnUGI(
- SecurityUtil.getServerPrincipal(conf
- .get(DFS_SECONDARY_NAMENODE_KRB_HTTPS_USER_NAME_KEY),
- infoBindAddress),
- conf.get(DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY));
- try {
- infoServer = httpUGI.doAs(new PrivilegedExceptionAction() {
- @Override
- public HttpServer run() throws IOException, InterruptedException {
- LOG.info("Starting web server as: " +
- UserGroupInformation.getCurrentUser().getUserName());
- int tmpInfoPort = infoSocAddr.getPort();
- infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
- tmpInfoPort == 0, conf,
- new AccessControlList(conf.get(DFS_ADMIN, " ")));
-
- if(UserGroupInformation.isSecurityEnabled()) {
- SecurityUtil.initKrb5CipherSuites();
- InetSocketAddress secInfoSocAddr =
- NetUtils.createSocketAddr(infoBindAddress + ":"+ conf.getInt(
- DFS_NAMENODE_SECONDARY_HTTPS_PORT_KEY,
- DFS_NAMENODE_SECONDARY_HTTPS_PORT_DEFAULT));
- imagePort = secInfoSocAddr.getPort();
- infoServer.addSslListener(secInfoSocAddr, conf, false, true);
+ // initialize the webserver for uploading files.
+ int tmpInfoPort = infoSocAddr.getPort();
+ infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort,
+ tmpInfoPort == 0, conf,
+ new AccessControlList(conf.get(DFS_ADMIN, " "))) {
+ {
+ if (UserGroupInformation.isSecurityEnabled()) {
+ Map params = new HashMap();
+ String principalInConf = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPENGO_USER_NAME_KEY);
+ if (principalInConf != null && !principalInConf.isEmpty()) {
+ params.put("kerberos.principal",
+ SecurityUtil.getServerPrincipal(principalInConf, infoSocAddr.getHostName()));
}
-
- infoServer.setAttribute("secondary.name.node", SecondaryNameNode.this);
- infoServer.setAttribute("name.system.image", checkpointImage);
- infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
- infoServer.addInternalServlet("getimage", "/getimage",
- GetImageServlet.class, true);
- infoServer.start();
- return infoServer;
+ String httpKeytab = conf.get(DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY);
+ if (httpKeytab != null && !httpKeytab.isEmpty()) {
+ params.put("kerberos.keytab", httpKeytab);
+ }
+ params.put(AuthenticationFilter.AUTH_TYPE, "kerberos");
+
+ defineFilter(webAppContext, SPNEGO_FILTER, AuthenticationFilter.class.getName(),
+ params, null);
}
- });
- } catch (InterruptedException e) {
- throw new RuntimeException(e);
- }
-
+ }
+ };
+ infoServer.setAttribute("secondary.name.node", this);
+ infoServer.setAttribute("name.system.image", checkpointImage);
+ infoServer.setAttribute(JspHelper.CURRENT_CONF, conf);
+ infoServer.addInternalServlet("getimage", "/getimage",
+ GetImageServlet.class, true);
+ infoServer.start();
+
LOG.info("Web server init done");
// The web-server port can be ephemeral... ensure we have the correct info
infoPort = infoServer.getPort();
- if (!UserGroupInformation.isSecurityEnabled()) {
- imagePort = infoPort;
- }
-
- conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" +infoPort);
- LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" +infoPort);
- LOG.info("Secondary image servlet up at: " + infoBindAddress + ":" + imagePort);
+
+ conf.set(DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, infoBindAddress + ":" + infoPort);
+ LOG.info("Secondary Web-server up at: " + infoBindAddress + ":" + infoPort);
LOG.info("Checkpoint Period :" + checkpointConf.getPeriod() + " secs " +
- "(" + checkpointConf.getPeriod()/60 + " min)");
+ "(" + checkpointConf.getPeriod() / 60 + " min)");
LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns");
}
@@ -434,7 +420,7 @@ private String getInfoServer() throws IOException {
throw new IOException("This is not a DFS");
}
- String configuredAddress = DFSUtil.getInfoServer(null, conf, true);
+ String configuredAddress = DFSUtil.getInfoServer(null, conf, false);
String address = DFSUtil.substituteForWildcardAddress(configuredAddress,
fsName.getHost());
LOG.debug("Will connect to NameNode at HTTP address: " + address);
@@ -446,7 +432,7 @@ private String getInfoServer() throws IOException {
* for image transfers
*/
private InetSocketAddress getImageListenAddress() {
- return new InetSocketAddress(infoBindAddress, imagePort);
+ return new InetSocketAddress(infoBindAddress, infoPort);
}
/**
@@ -507,7 +493,7 @@ boolean doCheckpoint() throws IOException {
/**
- * @param argv The parameters passed to this program.
+ * @param opts The parameters passed to this program.
* @exception Exception if the filesystem does not exist.
* @return 0 on success, non zero on error.
*/
@@ -709,7 +695,7 @@ static class CheckpointStorage extends FSImage {
* Construct a checkpoint image.
* @param conf Node configuration.
* @param imageDirs URIs of storage for image.
- * @param editDirs URIs of storage for edit logs.
+ * @param editsDirs URIs of storage for edit logs.
* @throws IOException If storage cannot be access.
*/
CheckpointStorage(Configuration conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
index b99720aff5a..97088c5f433 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java
@@ -201,19 +201,17 @@ static MD5Hash getFileClient(String nnHostPort,
String queryString, List localPaths,
NNStorage dstStorage, boolean getChecksum) throws IOException {
byte[] buf = new byte[HdfsConstants.IO_FILE_BUFFER_SIZE];
- String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
- StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
- str.append(queryString);
+ String str = "http://" + nnHostPort + "/getimage?" + queryString;
+ LOG.info("Opening connection to " + str);
//
// open connection to remote server
//
- URL url = new URL(str.toString());
-
- // Avoid Krb bug with cross-realm hosts
- SecurityUtil.fetchServiceTicket(url);
- HttpURLConnection connection = (HttpURLConnection) url.openConnection();
-
+ URL url = new URL(str);
+
+ HttpURLConnection connection = (HttpURLConnection)
+ SecurityUtil.openSecureHttpConnection(url);
+
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
throw new HttpGetFailedException(
"Image transfer servlet at " + url +
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
index b9601fa7eb9..e76de8bbbe7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
@@ -97,7 +97,6 @@ public class BootstrapStandby implements Tool, Configurable {
static final int ERR_CODE_LOGS_UNAVAILABLE = 6;
public int run(String[] args) throws Exception {
- SecurityUtil.initKrb5CipherSuites();
parseArgs(args);
parseConfAndFindOtherNN();
NameNode.checkAllowFormat(conf);
@@ -325,7 +324,7 @@ private void parseConfAndFindOtherNN() throws IOException {
"Could not determine valid IPC address for other NameNode (%s)" +
", got: %s", otherNNId, otherIpcAddr);
- otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, true);
+ otherHttpAddr = DFSUtil.getInfoServer(null, otherNode, false);
otherHttpAddr = DFSUtil.substituteForWildcardAddress(otherHttpAddr,
otherIpcAddr.getHostName());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
index 036dd431ade..bbec10c3084 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
@@ -92,7 +92,7 @@ private void setNameNodeAddresses(Configuration conf) {
}
private String getHttpAddress(Configuration conf) {
- String configuredAddr = DFSUtil.getInfoServer(null, conf, true);
+ String configuredAddr = DFSUtil.getInfoServer(null, conf, false);
// Use the hostname from the RPC address as a default, in case
// the HTTP address is configured to 0.0.0.0.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java
index f871828f840..60e41a615e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RemoteEditLogManifest.java
@@ -17,22 +17,16 @@
*/
package org.apache.hadoop.hdfs.server.protocol;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
import java.util.Collections;
import java.util.List;
-import org.apache.hadoop.io.Writable;
-
import com.google.common.base.Joiner;
import com.google.common.base.Preconditions;
-import com.google.common.collect.Lists;
/**
* An enumeration of logs available on a remote NameNode.
*/
-public class RemoteEditLogManifest implements Writable {
+public class RemoteEditLogManifest {
private List logs;
@@ -75,25 +69,4 @@ public List getLogs() {
public String toString() {
return "[" + Joiner.on(", ").join(logs) + "]";
}
-
-
- @Override
- public void write(DataOutput out) throws IOException {
- out.writeInt(logs.size());
- for (RemoteEditLog log : logs) {
- log.write(out);
- }
- }
-
- @Override
- public void readFields(DataInput in) throws IOException {
- int numLogs = in.readInt();
- logs = Lists.newArrayList();
- for (int i = 0; i < numLogs; i++) {
- RemoteEditLog log = new RemoteEditLog();
- log.readFields(in);
- logs.add(log);
- }
- checkState();
- }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index 9c49654c471..c7c206fcb73 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -26,6 +26,8 @@
import java.util.List;
import java.util.TreeSet;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -64,9 +66,11 @@
@InterfaceAudience.Private
public class DFSAdmin extends FsShell {
- static{
+ static {
HdfsConfiguration.init();
}
+
+ private static final Log LOG = LogFactory.getLog(DFSAdmin.class);
/**
* An abstract class for the execution of a file system command
@@ -504,7 +508,7 @@ public int setBalancerBandwidth(String[] argv, int idx) throws IOException {
*/
public int fetchImage(String[] argv, int idx) throws IOException {
String infoServer = DFSUtil.getInfoServer(
- HAUtil.getAddressOfActive(getDFS()), getConf(), true);
+ HAUtil.getAddressOfActive(getDFS()), getConf(), false);
TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
new File(argv[idx]));
return 0;
@@ -1089,6 +1093,7 @@ public int run(String[] argv) throws Exception {
return exitCode;
}
+ Exception debugException = null;
exitCode = 0;
try {
if ("-report".equals(cmd)) {
@@ -1143,6 +1148,7 @@ public int run(String[] argv) throws Exception {
printUsage("");
}
} catch (IllegalArgumentException arge) {
+ debugException = arge;
exitCode = -1;
System.err.println(cmd.substring(1) + ": " + arge.getLocalizedMessage());
printUsage(cmd);
@@ -1151,6 +1157,7 @@ public int run(String[] argv) throws Exception {
// This is a error returned by hadoop server. Print
// out the first line of the error message, ignore the stack trace.
exitCode = -1;
+ debugException = e;
try {
String[] content;
content = e.getLocalizedMessage().split("\n");
@@ -1159,12 +1166,17 @@ public int run(String[] argv) throws Exception {
} catch (Exception ex) {
System.err.println(cmd.substring(1) + ": "
+ ex.getLocalizedMessage());
+ debugException = ex;
}
} catch (Exception e) {
exitCode = -1;
+ debugException = e;
System.err.println(cmd.substring(1) + ": "
+ e.getLocalizedMessage());
- }
+ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Exception encountered:", debugException);
+ }
return exitCode;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
index 7d78bced360..34c72e9700d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
@@ -153,8 +153,7 @@ private Integer listCorruptFileBlocks(String dir, String baseUrl)
url.append("&startblockafter=").append(String.valueOf(cookie));
}
URL path = new URL(url.toString());
- SecurityUtil.fetchServiceTicket(path);
- URLConnection connection = path.openConnection();
+ URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
InputStream stream = connection.getInputStream();
BufferedReader input = new BufferedReader(new InputStreamReader(
stream, "UTF-8"));
@@ -222,16 +221,11 @@ private String getCurrentNamenodeAddress() throws IOException {
return null;
}
- return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, true);
+ return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, false);
}
private int doWork(final String[] args) throws IOException {
- String proto = "http://";
- if (UserGroupInformation.isSecurityEnabled()) {
- SecurityUtil.initKrb5CipherSuites();
- proto = "https://";
- }
- final StringBuilder url = new StringBuilder(proto);
+ final StringBuilder url = new StringBuilder("http://");
String namenodeAddress = getCurrentNamenodeAddress();
if (namenodeAddress == null) {
@@ -279,8 +273,7 @@ else if (args[idx].equals("-list-corruptfileblocks")) {
return listCorruptFileBlocks(dir, url.toString());
}
URL path = new URL(url.toString());
- SecurityUtil.fetchServiceTicket(path);
- URLConnection connection = path.openConnection();
+ URLConnection connection = SecurityUtil.openSecureHttpConnection(path);
InputStream stream = connection.getInputStream();
BufferedReader input = new BufferedReader(new InputStreamReader(
stream, "UTF-8"));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
index 3e652c13a7c..63aa6b2064f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
@@ -72,11 +72,6 @@ public class DelegationTokenFetcher {
private static final String RENEW = "renew";
private static final String PRINT = "print";
- static {
- // Enable Kerberos sockets
- System.setProperty("https.cipherSuites", "TLS_KRB5_WITH_3DES_EDE_CBC_SHA");
- }
-
private static void printUsage(PrintStream err) throws IOException {
err.println("fetchdt retrieves delegation tokens from the NameNode");
err.println();
@@ -106,7 +101,7 @@ public static void main(final String[] args) throws Exception {
final Configuration conf = new HdfsConfiguration();
Options fetcherOptions = new Options();
fetcherOptions.addOption(WEBSERVICE, true,
- "HTTPS url to reach the NameNode at");
+ "HTTP url to reach the NameNode at");
fetcherOptions.addOption(RENEWER, true,
"Name of the delegation token renewer");
fetcherOptions.addOption(CANCEL, false, "cancel the token");
@@ -224,8 +219,7 @@ static public Credentials getDTfromRemote(String nnAddr,
}
URL remoteURL = new URL(url.toString());
- SecurityUtil.fetchServiceTicket(remoteURL);
- URLConnection connection = URLUtils.openConnection(remoteURL);
+ URLConnection connection = SecurityUtil.openSecureHttpConnection(remoteURL);
InputStream in = connection.getInputStream();
Credentials ts = new Credentials();
dis = new DataInputStream(in);
@@ -264,7 +258,7 @@ static public long renewDelegationToken(String nnAddr,
try {
URL url = new URL(buf.toString());
- SecurityUtil.fetchServiceTicket(url);
+ connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
connection = (HttpURLConnection)URLUtils.openConnection(url);
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
throw new IOException("Error renewing token: " +
@@ -358,8 +352,7 @@ static public void cancelDelegationToken(String nnAddr,
HttpURLConnection connection=null;
try {
URL url = new URL(buf.toString());
- SecurityUtil.fetchServiceTicket(url);
- connection = (HttpURLConnection)URLUtils.openConnection(url);
+ connection = (HttpURLConnection) SecurityUtil.openSecureHttpConnection(url);
if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
throw new IOException("Error cancelling token: " +
connection.getResponseMessage());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 9218078a482..912f362728c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -40,7 +40,6 @@
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 00000000000..10b874b6855
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,2 @@
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier
+org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index 4ac0dd20570..209ed2e909a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -868,4 +868,15 @@
+
+ dfs.namenode.kerberos.internal.spnego.principal
+ ${dfs.web.authentication.kerberos.principal}
+
+
+
+ dfs.secondary.namenode.kerberos.internal.spnego.principal
+ ${dfs.web.authentication.kerberos.principal}
+
+
+
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
new file mode 100644
index 00000000000..9cc74e32705
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+/**
+ * Make sure that ViewFileSystem works when the root of an FS is mounted to a
+ * ViewFileSystem mount point.
+ */
+public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
+
+ private static MiniDFSCluster cluster;
+ private static Configuration CONF = new Configuration();
+ private static FileSystem fHdfs;
+
+ @BeforeClass
+ public static void clusterSetupAtBegining() throws IOException,
+ LoginException, URISyntaxException {
+ SupportsBlocks = true;
+ CONF.setBoolean(
+ DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+ cluster = new MiniDFSCluster.Builder(CONF)
+ .numDataNodes(2)
+ .build();
+ cluster.waitClusterUp();
+
+ fHdfs = cluster.getFileSystem();
+ }
+
+ @AfterClass
+ public static void clusterShutdownAtEnd() throws Exception {
+ cluster.shutdown();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ fsTarget = fHdfs;
+ super.setUp();
+ }
+
+ /**
+ * Override this so that we don't set the targetTestRoot to any path under the
+ * root of the FS, and so that we don't try to delete the test dir, but rather
+ * only its contents.
+ */
+ @Override
+ void initializeTargetTestRoot() throws IOException {
+ targetTestRoot = fHdfs.makeQualified(new Path("/"));
+ for (FileStatus status : fHdfs.listStatus(targetTestRoot)) {
+ fHdfs.delete(status.getPath(), true);
+ }
+ }
+
+ @Override
+ int getExpectedDelegationTokenCount() {
+ return 8;
+ }
+
+ @Override
+ int getExpectedDelegationTokenCountWithCredentials() {
+ return 1;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
index 7ad56c0e93c..9f71d85f051 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
@@ -105,17 +105,17 @@ void setupMountPoints() {
// additional mount.
@Override
int getExpectedDirPaths() {
- return 7;
+ return 8;
}
@Override
int getExpectedMountPoints() {
- return 8;
+ return 9;
}
@Override
int getExpectedDelegationTokenCount() {
- return 8;
+ return 9;
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
new file mode 100644
index 00000000000..449689242d4
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs.viewfs;
+
+import java.io.IOException;
+import java.net.URISyntaxException;
+
+import javax.security.auth.login.LoginException;
+
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RemoteIterator;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.AfterClass;
+import org.junit.Before;
+import org.junit.BeforeClass;
+
+/**
+ * Make sure that ViewFs works when the root of an FS is mounted to a ViewFs
+ * mount point.
+ */
+public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
+
+ private static MiniDFSCluster cluster;
+ private static HdfsConfiguration CONF = new HdfsConfiguration();
+ private static FileContext fc;
+
+ @BeforeClass
+ public static void clusterSetupAtBegining() throws IOException,
+ LoginException, URISyntaxException {
+ SupportsBlocks = true;
+ CONF.setBoolean(
+ DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true);
+
+ cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
+ cluster.waitClusterUp();
+ fc = FileContext.getFileContext(cluster.getURI(0), CONF);
+ }
+
+
+ @AfterClass
+ public static void ClusterShutdownAtEnd() throws Exception {
+ cluster.shutdown();
+ }
+
+ @Before
+ public void setUp() throws Exception {
+ // create the test root on local_fs
+ fcTarget = fc;
+ super.setUp();
+ }
+
+ /**
+ * Override this so that we don't set the targetTestRoot to any path under the
+ * root of the FS, and so that we don't try to delete the test dir, but rather
+ * only its contents.
+ */
+ @Override
+ void initializeTargetTestRoot() throws IOException {
+ targetTestRoot = fc.makeQualified(new Path("/"));
+ RemoteIterator dirContents = fc.listStatus(targetTestRoot);
+ while (dirContents.hasNext()) {
+ fc.delete(dirContents.next().getPath(), true);
+ }
+ }
+
+ /**
+ * This overrides the default implementation since hdfs does have delegation
+ * tokens.
+ */
+ @Override
+ int getExpectedDelegationTokenCount() {
+ return 8;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
index 0e94b4eb3d2..dc7110cfafe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsHdfs.java
@@ -20,7 +20,6 @@
import java.io.IOException;
import java.net.URISyntaxException;
-import java.util.List;
import javax.security.auth.login.LoginException;
@@ -30,20 +29,13 @@
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.Token;
-
-import org.junit.After;
import org.junit.AfterClass;
-import org.junit.Assert;
import org.junit.Before;
import org.junit.BeforeClass;
-import org.junit.Test;
-
public class TestViewFsHdfs extends ViewFsBaseTest {
private static MiniDFSCluster cluster;
- private static Path defaultWorkingDirectory;
private static HdfsConfiguration CONF = new HdfsConfiguration();
private static FileContext fc;
@@ -57,7 +49,7 @@ public static void clusterSetupAtBegining() throws IOException,
cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
cluster.waitClusterUp();
fc = FileContext.getFileContext(cluster.getURI(0), CONF);
- defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
+ Path defaultWorkingDirectory = fc.makeQualified( new Path("/user/" +
UserGroupInformation.getCurrentUser().getShortUserName()));
fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true);
}
@@ -73,25 +65,15 @@ public void setUp() throws Exception {
// create the test root on local_fs
fcTarget = fc;
super.setUp();
-
- }
-
- @After
- public void tearDown() throws Exception {
- super.tearDown();
}
-
- /*
- * This overides the default implementation since hdfs does have delegation
+ /**
+ * This overrides the default implementation since hdfs does have delegation
* tokens.
*/
@Override
- @Test
- public void testGetDelegationTokens() throws IOException {
- List> delTokens =
- fcView.getDelegationTokens(new Path("/"), "sanjay");
- Assert.assertEquals(7, delTokens.size());
+ int getExpectedDelegationTokenCount() {
+ return 8;
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
index 7c8a02e513e..5b50cefe102 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
@@ -59,6 +59,7 @@
import org.apache.hadoop.hdfs.client.HdfsDataInputStream;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -706,13 +707,59 @@ public static void setFederatedConfiguration(MiniDFSCluster cluster,
.join(nameservices));
}
+ private static DatanodeID getDatanodeID(String ipAddr) {
+ return new DatanodeID(ipAddr, "localhost",
+ DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+ }
+
+ public static DatanodeID getLocalDatanodeID() {
+ return new DatanodeID("127.0.0.1", "localhost",
+ DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+ }
+
+ public static DatanodeID getLocalDatanodeID(int port) {
+ return new DatanodeID("127.0.0.1", "localhost", "",
+ port, port, port);
+ }
+
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
- return new DatanodeDescriptor(
- new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+ return new DatanodeDescriptor(getLocalDatanodeID());
}
public static DatanodeInfo getLocalDatanodeInfo() {
- return new DatanodeInfo(
- new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+ return new DatanodeInfo(getLocalDatanodeID());
+ }
+
+ public static DatanodeInfo getDatanodeInfo(String ipAddr) {
+ return new DatanodeInfo(getDatanodeID(ipAddr));
+ }
+
+ public static DatanodeInfo getLocalDatanodeInfo(int port) {
+ return new DatanodeInfo(getLocalDatanodeID(port));
+ }
+
+ public static DatanodeInfo getDatanodeInfo(String ipAddr,
+ String host, int port) {
+ return new DatanodeInfo(new DatanodeID(ipAddr, host, port));
+ }
+
+ public static DatanodeInfo getLocalDatanodeInfo(String ipAddr,
+ String hostname, AdminStates adminState) {
+ return new DatanodeInfo(ipAddr, hostname, "storage",
+ DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+ DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
+ DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT,
+ 1, 2, 3, 4, 5, 6, "local", adminState);
+ }
+
+ public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+ String rackLocation) {
+ return getDatanodeDescriptor(ipAddr, DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
+ rackLocation);
+ }
+
+ public static DatanodeDescriptor getDatanodeDescriptor(String ipAddr,
+ int port, String rackLocation) {
+ return new DatanodeDescriptor(new DatanodeID(ipAddr, port), rackLocation);
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
index 335734d5b63..94f1dedee95 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestConnCache.java
@@ -24,6 +24,7 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSClient;
@@ -34,6 +35,7 @@
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.token.Token;
import org.junit.Test;
@@ -230,6 +232,33 @@ public void testReadFromOneDN() throws IOException {
in.close();
}
+
+ /**
+ * Test that the socket cache can be disabled by setting the capacity to
+ * 0. Regression test for HDFS-3365.
+ */
+ @Test
+ public void testDisableCache() throws IOException {
+ LOG.info("Starting testDisableCache()");
+
+ // Reading with the normally configured filesystem should
+ // cache a socket.
+ DFSTestUtil.readFile(fs, testFile);
+ assertEquals(1, ((DistributedFileSystem)fs).dfs.socketCache.size());
+
+ // Configure a new instance with no caching, ensure that it doesn't
+ // cache anything
+ Configuration confWithoutCache = new Configuration(fs.getConf());
+ confWithoutCache.setInt(
+ DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 0);
+ FileSystem fsWithoutCache = FileSystem.newInstance(confWithoutCache);
+ try {
+ DFSTestUtil.readFile(fsWithoutCache, testFile);
+ assertEquals(0, ((DistributedFileSystem)fsWithoutCache).dfs.socketCache.size());
+ } finally {
+ fsWithoutCache.close();
+ }
+ }
@AfterClass
public static void teardownCluster() throws Exception {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 86bef8e1ee7..ad0f74e83ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -333,7 +333,7 @@ private LocatedBlocks makeBadBlockList(LocatedBlocks goodBlockList) {
LocatedBlock badLocatedBlock = new LocatedBlock(
goodLocatedBlock.getBlock(),
new DatanodeInfo[] {
- new DatanodeInfo(new DatanodeID("255.255.255.255", 234))
+ DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234)
},
goodLocatedBlock.getStartOffset(),
false);
@@ -627,8 +627,7 @@ public void testClientDNProtocolTimeout() throws IOException {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
- DatanodeID fakeDnId = new DatanodeID(
- "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+ DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
index 6c1f8c4deb4..38aa1bf8578 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java
@@ -319,6 +319,25 @@ public void testConfModificationFederationAndHa() {
}
}
+ /**
+ * Ensure that fs.defaultFS is set in the configuration even if neither HA nor
+ * Federation is enabled.
+ *
+ * Regression test for HDFS-3351.
+ */
+ @Test
+ public void testConfModificationNoFederationOrHa() {
+ final HdfsConfiguration conf = new HdfsConfiguration();
+ String nsId = null;
+ String nnId = null;
+
+ conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1234");
+
+ assertFalse("hdfs://localhost:1234".equals(conf.get(FS_DEFAULT_NAME_KEY)));
+ NameNode.initializeGenericKeys(conf, nsId, nnId);
+ assertEquals("hdfs://localhost:1234", conf.get(FS_DEFAULT_NAME_KEY));
+ }
+
/**
* Regression test for HDFS-2934.
*/
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
new file mode 100644
index 00000000000..1ef4eac997e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
@@ -0,0 +1,199 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY;
+import static org.junit.Assert.*;
+
+import java.io.InputStream;
+import java.io.PrintWriter;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.util.ReflectionUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import com.google.common.io.NullOutputStream;
+
+public class TestDataTransferKeepalive {
+ Configuration conf = new HdfsConfiguration();
+ private MiniDFSCluster cluster;
+ private FileSystem fs;
+ private InetSocketAddress dnAddr;
+ private DataNode dn;
+ private DFSClient dfsClient;
+ private static Path TEST_FILE = new Path("/test");
+
+ private static final int KEEPALIVE_TIMEOUT = 1000;
+ private static final int WRITE_TIMEOUT = 3000;
+
+ @Before
+ public void setup() throws Exception {
+ conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
+ KEEPALIVE_TIMEOUT);
+ conf.setInt(DFS_CLIENT_MAX_BLOCK_ACQUIRE_FAILURES_KEY,
+ 0);
+
+ cluster = new MiniDFSCluster.Builder(conf)
+ .numDataNodes(1).build();
+ fs = cluster.getFileSystem();
+ dfsClient = ((DistributedFileSystem)fs).dfs;
+
+ String poolId = cluster.getNamesystem().getBlockPoolId();
+ dn = cluster.getDataNodes().get(0);
+ DatanodeRegistration dnReg = DataNodeTestUtils.getDNRegistrationForBP(
+ dn, poolId);
+ dnAddr = NetUtils.createSocketAddr(dnReg.getXferAddr());
+ }
+
+ @After
+ public void teardown() {
+ cluster.shutdown();
+ }
+
+ /**
+ * Regression test for HDFS-3357. Check that the datanode is respecting
+ * its configured keepalive timeout.
+ */
+ @Test(timeout=30000)
+ public void testKeepaliveTimeouts() throws Exception {
+ DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
+
+ // Clients that write aren't currently re-used.
+ assertEquals(0, dfsClient.socketCache.size());
+ assertXceiverCount(0);
+
+ // Reads the file, so we should get a
+ // cached socket, and should have an xceiver on the other side.
+ DFSTestUtil.readFile(fs, TEST_FILE);
+ assertEquals(1, dfsClient.socketCache.size());
+ assertXceiverCount(1);
+
+ // Sleep for a bit longer than the keepalive timeout
+ // and make sure the xceiver died.
+ Thread.sleep(KEEPALIVE_TIMEOUT * 2);
+ assertXceiverCount(0);
+
+ // The socket is still in the cache, because we don't
+ // notice that it's closed until we try to read
+ // from it again.
+ assertEquals(1, dfsClient.socketCache.size());
+
+ // Take it out of the cache - reading should
+ // give an EOF.
+ Socket s = dfsClient.socketCache.get(dnAddr);
+ assertNotNull(s);
+ assertEquals(-1, NetUtils.getInputStream(s).read());
+ }
+
+ /**
+ * Test for the case where the client beings to read a long block, but doesn't
+ * read bytes off the stream quickly. The datanode should time out sending the
+ * chunks and the transceiver should die, even if it has a long keepalive.
+ */
+ @Test(timeout=30000)
+ public void testSlowReader() throws Exception {
+ // Restart the DN with a shorter write timeout.
+ DataNodeProperties props = cluster.stopDataNode(0);
+ props.conf.setInt(DFS_DATANODE_SOCKET_WRITE_TIMEOUT_KEY,
+ WRITE_TIMEOUT);
+ props.conf.setInt(DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY,
+ 120000);
+ assertTrue(cluster.restartDataNode(props, true));
+ // Wait for heartbeats to avoid a startup race where we
+ // try to write the block while the DN is still starting.
+ cluster.triggerHeartbeats();
+
+ dn = cluster.getDataNodes().get(0);
+
+ DFSTestUtil.createFile(fs, TEST_FILE, 1024*1024*8L, (short)1, 0L);
+ FSDataInputStream stm = fs.open(TEST_FILE);
+ try {
+ stm.read();
+ assertXceiverCount(1);
+
+ Thread.sleep(WRITE_TIMEOUT + 1000);
+ // DN should time out in sendChunks, and this should force
+ // the xceiver to exit.
+ assertXceiverCount(0);
+ } finally {
+ IOUtils.closeStream(stm);
+ }
+ }
+
+ @Test(timeout=30000)
+ public void testManyClosedSocketsInCache() throws Exception {
+ // Make a small file
+ DFSTestUtil.createFile(fs, TEST_FILE, 1L, (short)1, 0L);
+
+ // Insert a bunch of dead sockets in the cache, by opening
+ // many streams concurrently, reading all of the data,
+ // and then closing them.
+ InputStream[] stms = new InputStream[5];
+ try {
+ for (int i = 0; i < stms.length; i++) {
+ stms[i] = fs.open(TEST_FILE);
+ }
+ for (InputStream stm : stms) {
+ IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
+ }
+ } finally {
+ IOUtils.cleanup(null, stms);
+ }
+
+ DFSClient client = ((DistributedFileSystem)fs).dfs;
+ assertEquals(5, client.socketCache.size());
+
+ // Let all the xceivers timeout
+ Thread.sleep(1500);
+ assertXceiverCount(0);
+
+ // Client side still has the sockets cached
+ assertEquals(5, client.socketCache.size());
+
+ // Reading should not throw an exception.
+ DFSTestUtil.readFile(fs, TEST_FILE);
+ }
+
+ private void assertXceiverCount(int expected) {
+ // Subtract 1, since the DataXceiverServer
+ // counts as one
+ int count = dn.getXceiverCount() - 1;
+ if (count != expected) {
+ ReflectionUtils.printThreadInfo(
+ new PrintWriter(System.err),
+ "Thread dumps");
+ fail("Expected " + expected + " xceivers, found " +
+ count);
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
index 4055cd8d3d6..a46a56b92df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
@@ -85,6 +85,7 @@ public void testFileSystemCloseAll() throws Exception {
/**
* Tests DFSClient.close throws no ConcurrentModificationException if
* multiple files are open.
+ * Also tests that any cached sockets are closed. (HDFS-3359)
*/
@Test
public void testDFSClose() throws Exception {
@@ -94,11 +95,23 @@ public void testDFSClose() throws Exception {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
FileSystem fileSys = cluster.getFileSystem();
- // create two files
+ // create two files, leaving them open
fileSys.create(new Path("/test/dfsclose/file-0"));
fileSys.create(new Path("/test/dfsclose/file-1"));
+
+ // create another file, close it, and read it, so
+ // the client gets a socket in its SocketCache
+ Path p = new Path("/non-empty-file");
+ DFSTestUtil.createFile(fileSys, p, 1L, (short)1, 0L);
+ DFSTestUtil.readFile(fileSys, p);
+
+ DFSClient client = ((DistributedFileSystem)fileSys).dfs;
+ SocketCache cache = client.socketCache;
+ assertEquals(1, cache.size());
fileSys.close();
+
+ assertEquals(0, cache.size());
} finally {
if (cluster != null) {cluster.shutdown();}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
index 20f28376a8e..54ff9036b91 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java
@@ -31,12 +31,15 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.HardLink;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset;
+import org.apache.hadoop.ipc.RemoteException;
+import org.junit.Assert;
import org.junit.Test;
/**
@@ -295,4 +298,43 @@ public void testFileNotFound() throws IOException {
cluster.shutdown();
}
}
+
+ /** Test two consecutive appends on a file with a full block. */
+ @Test
+ public void testAppendTwice() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+ final FileSystem fs1 = cluster.getFileSystem();
+ final FileSystem fs2 = AppendTestUtil.createHdfsWithDifferentUsername(conf);
+ try {
+
+ final Path p = new Path("/testAppendTwice/foo");
+ final int len = 1 << 16;
+ final byte[] fileContents = AppendTestUtil.initBuffer(len);
+
+ {
+ // create a new file with a full block.
+ FSDataOutputStream out = fs2.create(p, true, 4096, (short)1, len);
+ out.write(fileContents, 0, len);
+ out.close();
+ }
+
+ //1st append does not add any data so that the last block remains full
+ //and the last block in INodeFileUnderConstruction is a BlockInfo
+ //but not BlockInfoUnderConstruction.
+ fs2.append(p);
+
+ //2nd append should get AlreadyBeingCreatedException
+ fs1.append(p);
+ Assert.fail();
+ } catch(RemoteException re) {
+ AppendTestUtil.LOG.info("Got an exception:", re);
+ Assert.assertEquals(AlreadyBeingCreatedException.class.getName(),
+ re.getClassName());
+ } finally {
+ fs2.close();
+ fs1.close();
+ cluster.shutdown();
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
index 72c27bc9433..7370f72126c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestGetBlocks.java
@@ -121,8 +121,7 @@ public void testGetBlocks() throws Exception {
getBlocksWithException(namenode, dataNodes[0], -1);
// get blocks of size BlockSize from a non-existent datanode
- DatanodeInfo info = DFSTestUtil.getLocalDatanodeInfo();
- info.setIpAddr("1.2.3.4");
+ DatanodeInfo info = DFSTestUtil.getDatanodeInfo("1.2.3.4");
getBlocksWithException(namenode, info, 2);
} finally {
cluster.shutdown();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
index 9841dc8700e..9a7504a0508 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
@@ -62,7 +62,7 @@ public void testDefaultPolicy() throws Exception {
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for(int i = 0; i < infos.length; ) {
- infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100));
+ infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
index a6280d319aa..217960bca8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
@@ -131,7 +131,7 @@ public void testConvertNamenodeRegistration() {
@Test
public void testConvertDatanodeID() {
- DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3);
+ DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
DatanodeIDProto dnProto = PBHelper.convert(dn);
DatanodeID dn2 = PBHelper.convert(dnProto);
compare(dn, dn2);
@@ -280,10 +280,6 @@ public ExtendedBlock getExtendedBlock(long blkid) {
return new ExtendedBlock("bpid", blkid, 100, 2);
}
- private DatanodeInfo getDNInfo() {
- return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2));
- }
-
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
assertEquals(dn1.getAdminState(), dn2.getAdminState());
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
@@ -316,7 +312,9 @@ public void testConvertExtendedBlock() {
@Test
public void testConvertRecoveringBlock() {
- DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
+ DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
@@ -330,7 +328,9 @@ public void testConvertRecoveringBlock() {
@Test
public void testConvertBlockRecoveryCommand() {
- DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
+ DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
List blks = ImmutableList.of(
new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
@@ -400,13 +400,11 @@ private void compare(Token expected,
@Test
public void testConvertLocatedBlock() {
- DatanodeInfo [] dnInfos = new DatanodeInfo[3];
- dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002, 20000, 10001, 9999,
- 59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
- dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002, 20000, 10001, 9999,
- 59, 69, 32, "local", AdminStates.DECOMMISSIONED);
- dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002, 20000, 10001, 9999,
- 59, 69, 32, "local", AdminStates.NORMAL);
+ DatanodeInfo [] dnInfos = {
+ DFSTestUtil.getLocalDatanodeInfo("1.1.1.1", "h1", AdminStates.DECOMMISSION_INPROGRESS),
+ DFSTestUtil.getLocalDatanodeInfo("2.2.2.2", "h2", AdminStates.DECOMMISSIONED),
+ DFSTestUtil.getLocalDatanodeInfo("3.3.3.3", "h3", AdminStates.NORMAL)
+ };
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
LocatedBlockProto lbProto = PBHelper.convert(lb);
@@ -424,7 +422,7 @@ public void testConvertLocatedBlock() {
@Test
public void testConvertDatanodeRegistration() {
- DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
+ DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index ea335d26120..bf2c33815bf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -42,6 +42,7 @@
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -279,8 +280,7 @@ public void testBlockTokenRpcLeak() throws Exception {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
- DatanodeID fakeDnId = new DatanodeID("localhost",
- "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+ DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index a6e8c4f05b8..743fb3b08df 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -26,6 +26,7 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -47,17 +48,10 @@
import com.google.common.collect.Lists;
public class TestBlockManager {
- private final List nodes = ImmutableList.of(
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
- new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
- new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
- );
- private final List rackA = nodes.subList(0, 3);
- private final List rackB = nodes.subList(3, 6);
-
+ private List nodes;
+ private List rackA;
+ private List rackB;
+
/**
* Some of these tests exercise code which has some randomness involved -
* ie even if there's a bug, they may pass because the random node selection
@@ -82,6 +76,16 @@ public void setupMockCluster() throws IOException {
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, fsn, conf);
+ nodes = ImmutableList.of(
+ DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackB"),
+ DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackB"),
+ DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackB")
+ );
+ rackA = nodes.subList(0, 3);
+ rackB = nodes.subList(3, 6);
}
private void addNodes(Iterable nodesToAdd) {
@@ -116,7 +120,7 @@ public void testBasicReplication() throws Exception {
}
private void doBasicTest(int testIndex) {
- List origNodes = nodes(0, 1);
+ List origNodes = getNodes(0, 1);
BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes);
DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
@@ -147,7 +151,7 @@ public void testTwoOfThreeNodesDecommissioned() throws Exception {
private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
- List origNodes = nodes(0, 1, 3);
+ List origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission two of the nodes (A1, A2)
@@ -157,7 +161,7 @@ private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws Exception
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origNodes.contains(pipeline[0]));
- assertEquals("Should have two targets", 3, pipeline.length);
+ assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackA = false;
for (int i = 1; i < pipeline.length; i++) {
@@ -190,7 +194,7 @@ public void testAllNodesHoldingReplicasDecommissioned() throws Exception {
private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
- List origNodes = nodes(0, 1, 3);
+ List origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes
@@ -242,7 +246,7 @@ public void testOneOfTwoRacksDecommissioned() throws Exception {
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// Block originally on A1, A2, B1
- List origNodes = nodes(0, 1, 3);
+ List origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes in rack A
@@ -252,7 +256,7 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origNodes.contains(pipeline[0]));
- assertEquals("Should have 2 targets", 3, pipeline.length);
+ assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackB = false;
for (int i = 1; i < pipeline.length; i++) {
@@ -273,7 +277,8 @@ private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws Exception {
// the block is still under-replicated. Add a new node. This should allow
// the third off-rack replica.
- DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7", 100), "/rackC");
+ DatanodeDescriptor rackCNode =
+ DFSTestUtil.getDatanodeDescriptor("7.7.7.7", "/rackC");
addNodes(ImmutableList.of(rackCNode));
try {
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
@@ -313,13 +318,13 @@ private void doTestSufficientlyReplBlocksUsesNewRack(int testIndex) {
@Test
public void testBlocksAreNotUnderreplicatedInSingleRack() throws Exception {
- List nodes = ImmutableList.of(
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackA")
+ List nodes = ImmutableList.of(
+ DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/rackA"),
+ DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/rackA")
);
addNodes(nodes);
List origNodes = nodes.subList(0, 3);;
@@ -359,7 +364,7 @@ private BlockInfo blockOnNodes(long blkId, List nodes) {
return blockInfo;
}
- private List nodes(int ... indexes) {
+ private List getNodes(int ... indexes) {
List ret = Lists.newArrayList();
for (int idx : indexes) {
ret.add(nodes.get(idx));
@@ -368,7 +373,7 @@ private List nodes(int ... indexes) {
}
private List startDecommission(int ... indexes) {
- List nodes = nodes(indexes);
+ List nodes = getNodes(indexes);
for (DatanodeDescriptor node : nodes) {
node.startDecommission();
}
@@ -380,7 +385,7 @@ private BlockInfo addBlockOnNodes(long blockId, List nodes)
Mockito.doReturn((short)3).when(iNode).getReplication();
BlockInfo blockInfo = blockOnNodes(blockId, nodes);
- bm.blocksMap.addINode(blockInfo, iNode);
+ bm.blocksMap.addBlockCollection(blockInfo, iNode);
return blockInfo;
}
@@ -404,8 +409,9 @@ private DatanodeDescriptor[] scheduleSingleReplication(Block block) {
LinkedListMultimap repls = getAllPendingReplications();
assertEquals(1, repls.size());
- Entry repl = repls.entries()
- .iterator().next();
+ Entry repl =
+ repls.entries().iterator().next();
+
DatanodeDescriptor[] targets = repl.getValue().targets;
DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length];
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
index 08607093dbe..081438075c1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
@@ -18,73 +18,75 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.junit.Before;
import org.junit.Test;
+import static org.junit.Assert.*;
+
public class TestHost2NodesMap {
private Host2NodesMap map = new Host2NodesMap();
- private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1), "/d1/r2"),
- };
- private final DatanodeDescriptor NULL_NODE = null;
- private final DatanodeDescriptor NODE = new DatanodeDescriptor(new DatanodeID("h3", 5040),
- "/d1/r4");
-
+ private DatanodeDescriptor dataNodes[];
+
@Before
public void setup() {
- for(DatanodeDescriptor node:dataNodes) {
+ dataNodes = new DatanodeDescriptor[] {
+ DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
+ DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
+ DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
+ DFSTestUtil.getDatanodeDescriptor("3.3.3.3", 5021, "/d1/r2"),
+ };
+ for (DatanodeDescriptor node : dataNodes) {
map.add(node);
}
- map.add(NULL_NODE);
+ map.add(null);
}
@Test
public void testContains() throws Exception {
- for(int i=0; i q =
msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals(
- "ReportedBlockInfo [block=blk_1_1, dn=fake:100, reportedState=FINALIZED]," +
- "ReportedBlockInfo [block=blk_1_2, dn=fake:100, reportedState=FINALIZED]",
+ "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010, reportedState=FINALIZED]," +
+ "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010, reportedState=FINALIZED]",
Joiner.on(",").join(q));
assertEquals(0, msgs.count());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
index 49925ab885a..ce570f7eba2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
@@ -19,7 +19,7 @@
import static org.junit.Assert.*;
-import java.io.IOException;
+import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -39,54 +39,55 @@
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
+import org.junit.BeforeClass;
import org.junit.Test;
public class TestReplicationPolicy {
- private Random random= DFSUtil.getRandom();
+ private Random random = DFSUtil.getRandom();
private static final int BLOCK_SIZE = 1024;
private static final int NUM_OF_DATANODES = 6;
- private static final Configuration CONF = new HdfsConfiguration();
- private static final NetworkTopology cluster;
- private static final NameNode namenode;
- private static final BlockPlacementPolicy replicator;
+ private static NetworkTopology cluster;
+ private static NameNode namenode;
+ private static BlockPlacementPolicy replicator;
private static final String filename = "/dummyfile.txt";
- private static final DatanodeDescriptor dataNodes[] =
- new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
- new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
- };
-
- private final static DatanodeDescriptor NODE =
- new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
-
- static {
- try {
- FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
- CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
- DFSTestUtil.formatNameNode(CONF);
- namenode = new NameNode(CONF);
- } catch (IOException e) {
- e.printStackTrace();
- throw (RuntimeException)new RuntimeException().initCause(e);
- }
+ private static DatanodeDescriptor dataNodes[];
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ dataNodes = new DatanodeDescriptor[] {
+ DFSTestUtil.getDatanodeDescriptor("1.1.1.1", "/d1/r1"),
+ DFSTestUtil.getDatanodeDescriptor("2.2.2.2", "/d1/r1"),
+ DFSTestUtil.getDatanodeDescriptor("3.3.3.3", "/d1/r2"),
+ DFSTestUtil.getDatanodeDescriptor("4.4.4.4", "/d1/r2"),
+ DFSTestUtil.getDatanodeDescriptor("5.5.5.5", "/d2/r3"),
+ DFSTestUtil.getDatanodeDescriptor("6.6.6.6", "/d2/r3")
+ };
+
+ FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+ File baseDir = new File(System.getProperty(
+ "test.build.data", "build/test/data"), "dfs/");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ new File(baseDir, "name").getPath());
+
+ DFSTestUtil.formatNameNode(conf);
+ namenode = new NameNode(conf);
+
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
- for(int i=0; i syncList = new ArrayList(2);
BlockRecord record1 = new BlockRecord(
- new DatanodeID("xx", "yy", "zz", 1, 2, 3), dn1, replica1);
+ DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn1, replica1);
BlockRecord record2 = new BlockRecord(
- new DatanodeID("aa", "bb", "cc", 1, 2, 3), dn2, replica2);
+ DFSTestUtil.getDatanodeInfo("1.2.3.4", "bogus", 1234), dn2, replica2);
syncList.add(record1);
syncList.add(record2);
@@ -401,8 +402,7 @@ public void testRWRReplicas() throws IOException {
private Collection initRecoveringBlocks() throws IOException {
Collection blocks = new ArrayList(1);
- DatanodeInfo mockOtherDN = new DatanodeInfo(
- new DatanodeID("127.0.0.1", "localhost", "storage-1234", 0, 0, 0));
+ DatanodeInfo mockOtherDN = DFSTestUtil.getLocalDatanodeInfo();
DatanodeInfo[] locs = new DatanodeInfo[] {
new DatanodeInfo(dn.getDNRegistrationForBP(block.getBlockPoolId())),
mockOtherDN };
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
index b7195a34326..a5c85510743 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestInterDatanodeProtocol.java
@@ -356,8 +356,7 @@ public void testInterDNProtocolTimeout() throws Throwable {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
- DatanodeID fakeDnId = new DatanodeID(
- "localhost", "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+ DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
DatanodeInfo dInfo = new DatanodeInfo(fakeDnId);
InterDatanodeProtocol proxy = null;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
index 7f4872198bc..d1136bc21d5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java
@@ -29,6 +29,7 @@
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
+import java.util.Random;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
@@ -1155,4 +1156,75 @@ public boolean accept(File dir, String name) {
"No non-corrupt logs for txid " + startGapTxId, ioe);
}
}
+
+ /**
+ * Test that we can read from a byte stream without crashing.
+ *
+ */
+ static void validateNoCrash(byte garbage[]) throws IOException {
+ final String TEST_LOG_NAME = "test_edit_log";
+
+ EditLogFileOutputStream elfos = null;
+ File file = null;
+ EditLogFileInputStream elfis = null;
+ try {
+ file = new File(TEST_LOG_NAME);
+ elfos = new EditLogFileOutputStream(file, 0);
+ elfos.create();
+ elfos.writeRaw(garbage, 0, garbage.length);
+ elfos.setReadyToFlush();
+ elfos.flushAndSync();
+ elfos.close();
+ elfos = null;
+ file = new File(TEST_LOG_NAME);
+ elfis = new EditLogFileInputStream(file);
+
+ // verify that we can read everything without killing the JVM or
+ // throwing an exception other than IOException
+ try {
+ while (true) {
+ FSEditLogOp op = elfis.readOp();
+ if (op == null)
+ break;
+ }
+ } catch (IOException e) {
+ } catch (Throwable t) {
+ StringWriter sw = new StringWriter();
+ t.printStackTrace(new PrintWriter(sw));
+ fail("caught non-IOException throwable with message " +
+ t.getMessage() + "\nstack trace\n" + sw.toString());
+ }
+ } finally {
+ if ((elfos != null) && (elfos.isOpen()))
+ elfos.close();
+ if (elfis != null)
+ elfis.close();
+ }
+ }
+
+ static byte[][] invalidSequenecs = null;
+
+ /**
+ * "Fuzz" test for the edit log.
+ *
+ * This tests that we can read random garbage from the edit log without
+ * crashing the JVM or throwing an unchecked exception.
+ */
+ @Test
+ public void testFuzzSequences() throws IOException {
+ final int MAX_GARBAGE_LENGTH = 512;
+ final int MAX_INVALID_SEQ = 5000;
+ // The seed to use for our random number generator. When given the same
+ // seed, Java.util.Random will always produce the same sequence of values.
+ // This is important because it means that the test is deterministic and
+ // repeatable on any machine.
+ final int RANDOM_SEED = 123;
+
+ Random r = new Random(RANDOM_SEED);
+ for (int i = 0; i < MAX_INVALID_SEQ; i++) {
+ byte[] garbage = new byte[r.nextInt(MAX_GARBAGE_LENGTH)];
+ r.nextBytes(garbage);
+ validateNoCrash(garbage);
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
index 5e657ded489..2a144b88c9c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDNFencing.java
@@ -46,9 +46,9 @@
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyDefault;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
@@ -585,7 +585,7 @@ public RandomDeleterPolicy() {
}
@Override
- public DatanodeDescriptor chooseReplicaToDelete(FSInodeInfo inode,
+ public DatanodeDescriptor chooseReplicaToDelete(BlockCollection inode,
Block block, short replicationFactor,
Collection first,
Collection second) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
new file mode 100644
index 00000000000..bf1ca52b79d
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStateTransitionFailure.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.ha;
+
+import static org.apache.hadoop.test.GenericTestUtils.assertExceptionContains;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.times;
+import static org.mockito.Mockito.verify;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.ha.ServiceFailedException;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSNNTopology;
+import org.junit.Test;
+
+/**
+ * Tests to verify the behavior of failing to fully start transition HA states.
+ */
+public class TestStateTransitionFailure {
+
+ public static final Log LOG = LogFactory.getLog(TestStateTransitionFailure.class);
+
+ /**
+ * Ensure that a failure to fully transition to the active state causes a
+ * shutdown of the NameNode.
+ */
+ @Test
+ public void testFailureToTransitionCausesShutdown() throws IOException {
+ MiniDFSCluster cluster = null;
+ try {
+ Configuration conf = new Configuration();
+ // Set an illegal value for the trash emptier interval. This will cause
+ // the NN to fail to transition to the active state.
+ conf.setLong(CommonConfigurationKeys.FS_TRASH_INTERVAL_KEY, -1);
+ cluster = new MiniDFSCluster.Builder(conf)
+ .nnTopology(MiniDFSNNTopology.simpleHATopology())
+ .numDataNodes(0)
+ .build();
+ cluster.waitActive();
+ Runtime mockRuntime = mock(Runtime.class);
+ cluster.getNameNode(0).setRuntimeForTesting(mockRuntime);
+ verify(mockRuntime, times(0)).exit(anyInt());
+ try {
+ cluster.transitionToActive(0);
+ fail("Transitioned to active but should not have been able to.");
+ } catch (ServiceFailedException sfe) {
+ assertExceptionContains("Error encountered requiring NN shutdown. " +
+ "Shutting down immediately.", sfe);
+ LOG.info("got expected exception", sfe);
+ }
+ verify(mockRuntime, times(1)).exit(anyInt());
+ } finally {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
index b412fe1ab1c..a18af908fc6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
@@ -18,52 +18,60 @@
package org.apache.hadoop.net;
-
import java.util.HashMap;
import java.util.Map;
-import junit.framework.TestCase;
-
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-public class TestNetworkTopology extends TestCase {
+import org.junit.Test;
+import org.junit.Before;
+
+import static org.junit.Assert.*;
+
+public class TestNetworkTopology {
private final static NetworkTopology cluster = new NetworkTopology();
- private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"),
- new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3")
- };
- private final static DatanodeDescriptor NODE =
- new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4");
+ private DatanodeDescriptor dataNodes[];
- static {
- for(int i=0; i pickNodesAtRandom(int numNodes,
/**
* This test checks that chooseRandom works for an excluded node.
*/
+ @Test
public void testChooseRandomExcludedNode() {
String scope = "~" + NodeBase.getPath(dataNodes[0]);
Map frequency = pickNodesAtRandom(100, scope);
@@ -186,6 +199,7 @@ public void testChooseRandomExcludedNode() {
/**
* This test checks that chooseRandom works for an excluded rack.
*/
+ @Test
public void testChooseRandomExcludedRack() {
Map frequency = pickNodesAtRandom(100, "~" + "/d2");
// all the nodes on the second rack should be zero
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index 5d8a5404e78..ff4664675d4 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -163,6 +163,9 @@ Release 2.0.0 - UNRELEASED
MAPREDUCE-4219. make default container-executor.conf.dir be a path
relative to the container-executor binary. (rvs via tucu)
+ MAPREDUCE-4205. retrofit all JVM shutdown hooks to use ShutdownHookManager
+ (tucu)
+
OPTIMIZATIONS
BUG FIXES
@@ -278,6 +281,15 @@ Release 2.0.0 - UNRELEASED
MAPREDUCE-3173. MRV2 UI doesn't work properly without internet (Devaraj K
via bobby)
+ MAPREDUCE-3958. RM: Remove RMNodeState and replace it with NodeState
+ (Bikas Saha via bobby)
+
+ MAPREDUCE-4231. Update RAID to use the new BlockCollection interface.
+ (szetszwo)
+
+ MAPREDUCE-4148. MapReduce should not have a compile-time dependency on
+ HDFS. (tomwhite)
+
Release 0.23.3 - UNRELEASED
INCOMPATIBLE CHANGES
@@ -315,8 +327,13 @@ Release 0.23.3 - UNRELEASED
MAPREDUCE-4210. Expose listener address for WebApp (Daryn Sharp via bobby)
+ MAPREDUCE-4162. Correctly set token service (Daryn Sharp via bobby)
+
OPTIMIZATIONS
+ MAPREDUCE-3850. Avoid redundant calls for tokens in TokenCache (Daryn
+ Sharp via bobby)
+
BUG FIXES
MAPREDUCE-4092. commitJob Exception does not fail job (Jon Eagles via
@@ -447,6 +464,25 @@ Release 0.23.3 - UNRELEASED
MAPREDUCE-4211. Error conditions (missing appid, appid not found) are
masked in the RM app page (Jonathan Eagles via bobby)
+ MAPREDUCE-4163. consistently set the bind address (Daryn Sharp via bobby)
+
+ MAPREDUCE-4048. NullPointerException exception while accessing the
+ Application Master UI (Devaraj K via bobby)
+
+ MAPREDUCE-4220. RM apps page starttime/endtime sorts are incorrect
+ (Jonathan Eagles via bobby)
+
+ MAPREDUCE-4226. ConcurrentModificationException in FileSystemCounterGroup.
+ (tomwhite)
+
+ MAPREDUCE-4215. RM app page shows 500 error on appid parse error
+ (Jonathon Eagles via tgraves)
+
+ MAPREDUCE-4237. TestNodeStatusUpdater can fail if localhost has a domain
+ associated with it (bobby)
+
+ MAPREDUCE-4233. NPE can happen in RMNMNodeInfo. (bobby)
+
Release 0.23.2 - UNRELEASED
INCOMPATIBLE CHANGES
diff --git a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
index 4cd6eb1ec8d..ed2eef0eb9d 100644
--- a/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
+++ b/hadoop-mapreduce-project/bin/mr-jobhistory-daemon.sh
@@ -94,6 +94,7 @@ export HADOOP_ROOT_LOGGER=${HADOOP_ROOT_LOGGER:-INFO,RFA}
export HADOOP_JHS_LOGGER=${HADOOP_JHS_LOGGER:-INFO,JSA}
log=$YARN_LOG_DIR/yarn-$YARN_IDENT_STRING-$command-$HOSTNAME.out
pid=$YARN_PID_DIR/yarn-$YARN_IDENT_STRING-$command.pid
+YARN_STOP_TIMEOUT=${YARN_STOP_TIMEOUT:-5}
# Set default scheduling priority
if [ "$YARN_NICENESS" = "" ]; then
@@ -129,9 +130,15 @@ case $startStop in
(stop)
if [ -f $pid ]; then
- if kill -0 `cat $pid` > /dev/null 2>&1; then
+ TARGET_PID=`cat $pid`
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
echo stopping $command
- kill `cat $pid`
+ kill $TARGET_PID
+ sleep $YARN_STOP_TIMEOUT
+ if kill -0 $TARGET_PID > /dev/null 2>&1; then
+ echo "$command did not stop gracefully after $YARN_STOP_TIMEOUT seconds: killing with kill -9"
+ kill -9 $TARGET_PID
+ fi
else
echo no $command to stop
fi
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
index 164f406017e..fdcec65a90a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.mapred;
import java.io.IOException;
-import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.Collections;
@@ -127,10 +126,7 @@ protected void startRpcServer() {
}
server.start();
- InetSocketAddress listenerAddress = server.getListenerAddress();
- listenerAddress.getAddress();
- this.address = NetUtils.createSocketAddr(InetAddress.getLocalHost()
- .getCanonicalHostName() + ":" + listenerAddress.getPort());
+ this.address = NetUtils.getConnectAddress(server);
} catch (IOException e) {
throw new YarnException(e);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
index 01b29eaf178..2e8defbb549 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java
@@ -50,7 +50,9 @@
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.metrics2.source.JvmMetrics;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -77,7 +79,8 @@ public static void main(String[] args) throws Throwable {
String host = args[0];
int port = Integer.parseInt(args[1]);
- final InetSocketAddress address = new InetSocketAddress(host, port);
+ final InetSocketAddress address =
+ NetUtils.createSocketAddrForHost(host, port);
final TaskAttemptID firstTaskid = TaskAttemptID.forName(args[2]);
int jvmIdInt = Integer.parseInt(args[3]);
JVMId jvmId = new JVMId(firstTaskid.getJobID(),
@@ -214,8 +217,7 @@ private static Token loadCredentials(JobConf conf,
LOG.debug("loading token. # keys =" +credentials.numberOfSecretKeys() +
"; from file=" + jobTokenFile);
Token jt = TokenCache.getJobToken(credentials);
- jt.setService(new Text(address.getAddress().getHostAddress() + ":"
- + address.getPort()));
+ SecurityUtil.setTokenService(jt, address);
UserGroupInformation current = UserGroupInformation.getCurrentUser();
current.addToken(jt);
for (Token extends TokenIdentifier> tok : credentials.getAllTokens()) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index f76ae5a9db3..2d6f3121485 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -90,6 +90,7 @@
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.ClusterInfo;
import org.apache.hadoop.yarn.SystemClock;
@@ -130,6 +131,11 @@ public class MRAppMaster extends CompositeService {
private static final Log LOG = LogFactory.getLog(MRAppMaster.class);
+ /**
+ * Priority of the MRAppMaster shutdown hook.
+ */
+ public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+
private Clock clock;
private final long startTime;
private final long appSubmitTime;
@@ -990,8 +996,8 @@ public static void main(String[] args) {
new MRAppMaster(applicationAttemptId, containerId, nodeHostString,
Integer.parseInt(nodePortString),
Integer.parseInt(nodeHttpPortString), appSubmitTime);
- Runtime.getRuntime().addShutdownHook(
- new MRAppMasterShutdownHook(appMaster));
+ ShutdownHookManager.get().addShutdownHook(
+ new MRAppMasterShutdownHook(appMaster), SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
conf.addResource(new Path(MRJobConfig.JOB_CONF_FILE));
String jobUserName = System
@@ -1010,7 +1016,7 @@ public static void main(String[] args) {
// The shutdown hook that runs when a signal is received AND during normal
// close of the JVM.
- static class MRAppMasterShutdownHook extends Thread {
+ static class MRAppMasterShutdownHook implements Runnable {
MRAppMaster appMaster;
MRAppMasterShutdownHook(MRAppMaster appMaster) {
this.appMaster = appMaster;
@@ -1028,12 +1034,6 @@ public void run() {
appMaster.jobHistoryEventHandler.setSignalled(true);
}
appMaster.stop();
- try {
- //Close all the FileSystem objects
- FileSystem.closeAll();
- } catch (IOException ioe) {
- LOG.warn("Failed to close all FileSystem objects", ioe);
- }
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
index 60b29e831df..341e7215293 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
@@ -18,9 +18,7 @@
package org.apache.hadoop.mapreduce.v2.app.client;
-import java.net.InetAddress;
import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
import java.util.Arrays;
import java.util.Collection;
@@ -78,7 +76,6 @@
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -116,13 +113,7 @@ public MRClientService(AppContext appContext) {
public void start() {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
- InetSocketAddress address = NetUtils.createSocketAddr("0.0.0.0:0");
- InetAddress hostNameResolved = null;
- try {
- hostNameResolved = InetAddress.getLocalHost();
- } catch (UnknownHostException e) {
- throw new YarnException(e);
- }
+ InetSocketAddress address = new InetSocketAddress(0);
ClientToAMSecretManager secretManager = null;
if (UserGroupInformation.isSecurityEnabled()) {
@@ -150,9 +141,7 @@ public void start() {
}
server.start();
- this.bindAddress =
- NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
- + ":" + server.getPort());
+ this.bindAddress = NetUtils.getConnectAddress(server);
LOG.info("Instantiated MRClientService at " + this.bindAddress);
try {
webApp = WebApps.$for("mapreduce", AppContext.class, appContext, "ws").with(conf).
@@ -191,6 +180,11 @@ class MRClientProtocolHandler implements MRClientProtocol {
private RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
+ @Override
+ public InetSocketAddress getConnectAddress() {
+ return getBindAddress();
+ }
+
private Job verifyAndGetJob(JobId jobID,
boolean modifyAccess) throws YarnRemoteException {
Job job = appContext.getJob(jobID);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
index 46a6111d610..44dd16daa05 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.mapreduce.v2.app.launcher;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
import java.security.PrivilegedAction;
import java.util.HashSet;
@@ -34,7 +35,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.ShuffleHandler;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
@@ -58,6 +58,7 @@
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.service.AbstractService;
+import org.apache.hadoop.yarn.util.ProtoUtils;
import org.apache.hadoop.yarn.util.Records;
import com.google.common.util.concurrent.ThreadFactoryBuilder;
@@ -321,13 +322,13 @@ protected ContainerManager getCMProxy(ContainerId containerID,
final String containerManagerBindAddr, ContainerToken containerToken)
throws IOException {
+ final InetSocketAddress cmAddr =
+ NetUtils.createSocketAddr(containerManagerBindAddr);
UserGroupInformation user = UserGroupInformation.getCurrentUser();
if (UserGroupInformation.isSecurityEnabled()) {
- Token token = new Token(
- containerToken.getIdentifier().array(), containerToken
- .getPassword().array(), new Text(containerToken.getKind()),
- new Text(containerToken.getService()));
+ Token token =
+ ProtoUtils.convertFromProtoFormat(containerToken, cmAddr);
// the user in createRemoteUser in this context has to be ContainerID
user = UserGroupInformation.createRemoteUser(containerID.toString());
user.addToken(token);
@@ -338,8 +339,7 @@ protected ContainerManager getCMProxy(ContainerId containerID,
@Override
public ContainerManager run() {
return (ContainerManager) rpc.getProxy(ContainerManager.class,
- NetUtils.createSocketAddr(containerManagerBindAddr),
- getConfig());
+ cmAddr, getConfig());
}
});
return proxy;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index 49df2176ef9..b0471e68ca0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -36,6 +36,7 @@
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -133,15 +134,14 @@ protected float getApplicationProgress() {
protected void register() {
//Register
- String host = clientService.getBindAddress().getAddress()
- .getCanonicalHostName();
+ InetSocketAddress serviceAddr = clientService.getBindAddress();
try {
RegisterApplicationMasterRequest request =
recordFactory.newRecordInstance(RegisterApplicationMasterRequest.class);
request.setApplicationAttemptId(applicationAttemptId);
- request.setHost(host);
- request.setRpcPort(clientService.getBindAddress().getPort());
- request.setTrackingUrl(host + ":" + clientService.getHttpPort());
+ request.setHost(serviceAddr.getHostName());
+ request.setRpcPort(serviceAddr.getPort());
+ request.setTrackingUrl(serviceAddr.getHostName() + ":" + clientService.getHttpPort());
RegisterApplicationMasterResponse response =
scheduler.registerApplicationMaster(request);
minContainerCapability = response.getMinimumResourceCapability();
@@ -262,9 +262,6 @@ protected AMRMProtocol createSchedulerProxy() {
if (UserGroupInformation.isSecurityEnabled()) {
String tokenURLEncodedStr = System.getenv().get(
ApplicationConstants.APPLICATION_MASTER_TOKEN_ENV_NAME);
- if (LOG.isDebugEnabled()) {
- LOG.debug("AppMasterToken is " + tokenURLEncodedStr);
- }
Token extends TokenIdentifier> token = new Token();
try {
@@ -273,6 +270,10 @@ protected AMRMProtocol createSchedulerProxy() {
throw new YarnException(e);
}
+ SecurityUtil.setTokenService(token, serviceAddr);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("AppMasterToken is " + token);
+ }
currentUser.addToken(token);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index f9583da5a97..da537e5bc71 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -27,6 +27,8 @@
import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -47,6 +49,8 @@
* This class renders the various pages that the web app supports.
*/
public class AppController extends Controller implements AMParams {
+ private static final Log LOG = LogFactory.getLog(AppController.class);
+
protected final App app;
protected AppController(App app, Configuration conf, RequestContext ctx,
@@ -220,6 +224,8 @@ public void tasks() {
toString().toLowerCase(Locale.US));
setTitle(join(tt, " Tasks for ", $(JOB_ID)));
} catch (Exception e) {
+ LOG.error("Failed to render tasks page with task type : "
+ + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e);
badRequest(e.getMessage());
}
}
@@ -283,6 +289,8 @@ public void attempts() {
render(attemptsPage());
} catch (Exception e) {
+ LOG.error("Failed to render attempts page with task type : "
+ + $(TASK_TYPE) + " for job id : " + $(JOB_ID), e);
badRequest(e.getMessage());
}
}
@@ -316,7 +324,8 @@ public void conf() {
*/
void badRequest(String s) {
setStatus(HttpServletResponse.SC_BAD_REQUEST);
- setTitle(join("Bad request: ", s));
+ String title = "Bad request: ";
+ setTitle((s != null) ? join(title, s) : title);
}
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
index cde1333ec8f..9ae938a8081 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncher.java
@@ -356,7 +356,7 @@ protected ContainerManager getCMProxy(ContainerId containerID,
// make proxy connect to our local containerManager server
ContainerManager proxy = (ContainerManager) rpc.getProxy(
ContainerManager.class,
- NetUtils.createSocketAddr("localhost:" + server.getPort()), conf);
+ NetUtils.getConnectAddress(server), conf);
return proxy;
}
};
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
new file mode 100644
index 00000000000..4fcb4755736
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/webapp/TestAppController.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Matchers.eq;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import org.apache.commons.lang.StringUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.util.Records;
+import org.apache.hadoop.yarn.webapp.Controller.RequestContext;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestAppController {
+
+ private AppController appController;
+ private RequestContext ctx;
+
+ @Before
+ public void setUp() {
+ AppContext context = mock(AppContext.class);
+ when(context.getApplicationID()).thenReturn(
+ Records.newRecord(ApplicationId.class));
+ App app = new App(context);
+ Configuration conf = new Configuration();
+ ctx = mock(RequestContext.class);
+ appController = new AppController(app, conf, ctx);
+ }
+
+ @Test
+ public void testBadRequest() {
+ String message = "test string";
+ appController.badRequest(message);
+ verifyExpectations(message);
+ }
+
+ @Test
+ public void testBadRequestWithNullMessage() {
+ // It should not throw NullPointerException
+ appController.badRequest(null);
+ verifyExpectations(StringUtils.EMPTY);
+ }
+
+ private void verifyExpectations(String message) {
+ verify(ctx).setStatus(400);
+ verify(ctx).set("app.id", "application_0_0000");
+ verify(ctx).set(eq("rm.web"), anyString());
+ verify(ctx).set("title", "Bad request: " + message);
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
index bc590b606a3..08166b96b18 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/MRClientProtocol.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.mapreduce.v2.api;
+import java.net.InetSocketAddress;
+
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptResponse;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
@@ -45,6 +47,11 @@
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
public interface MRClientProtocol {
+ /**
+ * Address to which the client is connected
+ * @return InetSocketAddress
+ */
+ public InetSocketAddress getConnectAddress();
public GetJobReportResponse getJobReport(GetJobReportRequest request) throws YarnRemoteException;
public GetTaskReportResponse getTaskReport(GetTaskReportRequest request) throws YarnRemoteException;
public GetTaskAttemptReportResponse getTaskAttemptReport(GetTaskAttemptReportRequest request) throws YarnRemoteException;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
index cf14532902c..3ab3f0c3b8b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java
@@ -104,6 +104,11 @@ public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr,
MRClientProtocolPB.class, clientVersion, addr, conf);
}
+ @Override
+ public InetSocketAddress getConnectAddress() {
+ return RPC.getServerAddress(proxy);
+ }
+
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws YarnRemoteException {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 00000000000..0975deab7e7
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1 @@
+org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
index 9401f4b585a..c76328d5056 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/TestRPCFactories.java
@@ -122,6 +122,11 @@ private void testPbClientFactory() {
public class MRClientProtocolTestImpl implements MRClientProtocol {
+ @Override
+ public InetSocketAddress getConnectAddress() {
+ return null;
+ }
+
@Override
public GetJobReportResponse getJobReport(GetJobReportRequest request)
throws YarnRemoteException {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index cfb8ce4bd7e..e60d745faa5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -37,6 +37,7 @@
org.apache.hadoophadoop-hdfs
+ test
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index eb838fe8a7a..e456a7afa88 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -35,13 +35,11 @@
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Master;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.util.ConfigUtil;
import org.apache.hadoop.mapreduce.v2.LogParams;
-import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@@ -388,21 +386,8 @@ public long getTaskTrackerExpiryInterval() throws IOException,
*/
public Token
getDelegationToken(Text renewer) throws IOException, InterruptedException{
- Token result =
- client.getDelegationToken(renewer);
-
- if (result == null) {
- return result;
- }
-
- InetSocketAddress addr = Master.getMasterAddress(conf);
- StringBuilder service = new StringBuilder();
- service.append(NetUtils.normalizeHostName(addr.getAddress().
- getHostAddress()));
- service.append(':');
- service.append(addr.getPort());
- result.setService(new Text(service.toString()));
- return result;
+ // client has already set the service
+ return client.getDelegationToken(renewer);
}
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 4038f65cd46..148df503243 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -38,7 +38,6 @@
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.QueueACL;
@@ -433,8 +432,7 @@ private void printTokens(JobID jobId,
LOG.debug("Printing tokens for job: " + jobId);
for(Token> token: credentials.getAllTokens()) {
if (token.getKind().toString().equals("HDFS_DELEGATION_TOKEN")) {
- LOG.debug("Submitting with " +
- org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier.stringifyToken(token));
+ LOG.debug("Submitting with " + token);
}
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
index 7c23561b659..3f3729fb056 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/FileSystemCounterGroup.java
@@ -23,6 +23,7 @@
import java.io.IOException;
import java.util.Arrays;
import java.util.concurrent.ConcurrentMap;
+import java.util.concurrent.ConcurrentSkipListMap;
import java.util.Iterator;
import java.util.Locale;
import java.util.Map;
@@ -54,7 +55,8 @@ public abstract class FileSystemCounterGroup
// C[] would need Array.newInstance which requires a Class reference.
// Just a few local casts probably worth not having to carry it around.
- private final Map map = Maps.newTreeMap();
+ private final Map map =
+ new ConcurrentSkipListMap();
private String displayName;
private static final Joiner NAME_JOINER = Joiner.on('_');
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
index ef25939ebdb..1109f3f3825 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/TokenCache.java
@@ -19,7 +19,9 @@
package org.apache.hadoop.mapreduce.security;
import java.io.IOException;
+import java.util.HashSet;
import java.util.List;
+import java.util.Set;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -28,7 +30,6 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.Master;
@@ -92,8 +93,11 @@ public static void cleanUpTokenReferral(Configuration conf) {
static void obtainTokensForNamenodesInternal(Credentials credentials,
Path[] ps, Configuration conf) throws IOException {
+ Set fsSet = new HashSet();
for(Path p: ps) {
- FileSystem fs = FileSystem.get(p.toUri(), conf);
+ fsSet.add(p.getFileSystem(conf));
+ }
+ for (FileSystem fs : fsSet) {
obtainTokensForNamenodesInternal(fs, credentials, conf);
}
}
@@ -174,16 +178,14 @@ private static void mergeBinaryTokens(Credentials creds, Configuration conf) {
* @param namenode
* @return delegation token
*/
- @SuppressWarnings("unchecked")
@InterfaceAudience.Private
- public static Token getDelegationToken(
+ public static Token> getDelegationToken(
Credentials credentials, String namenode) {
//No fs specific tokens issues by this fs. It may however issue tokens
// for other filesystems - which would be keyed by that filesystems name.
if (namenode == null)
return null;
- return (Token) credentials.getToken(new Text(
- namenode));
+ return (Token>) credentials.getToken(new Text(namenode));
}
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
index e4675b523a5..90007770691 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
@@ -39,7 +39,6 @@
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.StringUtils;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
new file mode 100644
index 00000000000..f797a6aa6ff
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier
@@ -0,0 +1,2 @@
+org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier
+org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
index f494556bac3..74a4744e157 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.mapred;
import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
import java.io.IOException;
import java.text.ParseException;
@@ -203,6 +204,25 @@ public void testGroupIteratorConcurrency() {
counters.incrCounter("group1", "counter2", 1);
iterator.next();
}
+
+ @Test
+ public void testFileSystemGroupIteratorConcurrency() {
+ Counters counters = new Counters();
+ // create 2 filesystem counter groups
+ counters.findCounter("fs1", FileSystemCounter.BYTES_READ).increment(1);
+ counters.findCounter("fs2", FileSystemCounter.BYTES_READ).increment(1);
+
+ // Iterate over the counters in this group while updating counters in
+ // the group
+ Group group = counters.getGroup(FileSystemCounter.class.getName());
+ Iterator iterator = group.iterator();
+ counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
+ assertTrue(iterator.hasNext());
+ iterator.next();
+ counters.findCounter("fs3", FileSystemCounter.BYTES_READ).increment(1);
+ assertTrue(iterator.hasNext());
+ iterator.next();
+ }
public static void main(String[] args) throws IOException {
new TestCounters().testCounters();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
index b0e9350dbed..1ae2ecde1e2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/security/TestTokenCache.java
@@ -251,6 +251,26 @@ public Token> answer(InvocationOnMock invocation) throws Throwable {
return mockFs;
}
+ @Test
+ public void testSingleTokenFetch() throws Exception {
+ Configuration conf = new Configuration();
+ conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM");
+ String renewer = Master.getMasterPrincipal(conf);
+ Credentials credentials = new Credentials();
+
+ FileSystem mockFs = mock(FileSystem.class);
+ when(mockFs.getCanonicalServiceName()).thenReturn("host:0");
+ when(mockFs.getUri()).thenReturn(new URI("mockfs://host:0"));
+
+ Path mockPath = mock(Path.class);
+ when(mockPath.getFileSystem(conf)).thenReturn(mockFs);
+
+ Path[] paths = new Path[]{ mockPath, mockPath };
+ when(mockFs.getDelegationTokens("me", credentials)).thenReturn(null);
+ TokenCache.obtainTokensForNamenodesInternal(credentials, paths, conf);
+ verify(mockFs, times(1)).getDelegationTokens(renewer, credentials);
+ }
+
@Test
public void testCleanUpTokenReferral() throws Exception {
Configuration conf = new Configuration();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
index a4017631148..388356f01ab 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java
@@ -19,9 +19,7 @@
package org.apache.hadoop.mapreduce.v2.hs;
import java.io.IOException;
-import java.net.InetAddress;
import java.net.InetSocketAddress;
-import java.net.UnknownHostException;
import java.security.AccessControlException;
import java.security.PrivilegedExceptionAction;
import java.util.Arrays;
@@ -76,7 +74,6 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.DelegationToken;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
@@ -117,17 +114,10 @@ public void start() {
Configuration conf = getConfig();
YarnRPC rpc = YarnRPC.create(conf);
initializeWebApp(conf);
- String serviceAddr = conf.get(JHAdminConfig.MR_HISTORY_ADDRESS,
- JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
- InetSocketAddress address = NetUtils.createSocketAddr(serviceAddr,
- JHAdminConfig.DEFAULT_MR_HISTORY_PORT,
- JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS);
- InetAddress hostNameResolved = null;
- try {
- hostNameResolved = InetAddress.getLocalHost();
- } catch (UnknownHostException e) {
- throw new YarnException(e);
- }
+ InetSocketAddress address = conf.getSocketAddr(
+ JHAdminConfig.MR_HISTORY_ADDRESS,
+ JHAdminConfig.DEFAULT_MR_HISTORY_ADDRESS,
+ JHAdminConfig.DEFAULT_MR_HISTORY_PORT);
server =
rpc.getServer(HSClientProtocol.class, protocolHandler, address,
@@ -143,31 +133,24 @@ public void start() {
}
server.start();
- this.bindAddress =
- NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
- + ":" + server.getPort());
+ this.bindAddress = conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_ADDRESS,
+ server.getListenerAddress());
LOG.info("Instantiated MRClientService at " + this.bindAddress);
- if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) {
- String resolvedAddress = bindAddress.getHostName() + ":" + bindAddress.getPort();
- conf.set(JHAdminConfig.MR_HISTORY_ADDRESS, resolvedAddress);
-
- String hostname = getConfig().get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
- JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
- hostname = (hostname.contains(":")) ? hostname.substring(0, hostname.indexOf(":")) : hostname;
- int port = webApp.port();
- resolvedAddress = hostname + ":" + port;
- conf.set(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, resolvedAddress);
- }
-
super.start();
}
private void initializeWebApp(Configuration conf) {
webApp = new HsWebApp(history);
- String bindAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
- JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
- WebApps.$for("jobhistory", HistoryClientService.class, this, "ws").with(conf).at(bindAddress).start(webApp);
+ InetSocketAddress bindAddress = conf.getSocketAddr(
+ JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
+ JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS,
+ JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT);
+ // NOTE: there should be a .at(InetSocketAddress)
+ WebApps.$for("jobhistory", HistoryClientService.class, this, "ws")
+ .with(conf).at(NetUtils.getHostPortString(bindAddress)).start(webApp);
+ conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
+ webApp.getListenerAddress());
}
@Override
@@ -195,6 +178,10 @@ private class HSClientProtocolHandler implements HSClientProtocol {
private RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null);
+ public InetSocketAddress getConnectAddress() {
+ return getBindAddress();
+ }
+
private Job verifyAndGetJob(final JobId jobID) throws YarnRemoteException {
UserGroupInformation loginUgi = null;
Job job = null;
@@ -352,8 +339,7 @@ public GetDelegationTokenResponse getDelegationToken(
jhsDTSecretManager);
DelegationToken mrDToken = BuilderUtils.newDelegationToken(
realJHSToken.getIdentifier(), realJHSToken.getKind().toString(),
- realJHSToken.getPassword(), bindAddress.getAddress().getHostAddress()
- + ":" + bindAddress.getPort());
+ realJHSToken.getPassword(), realJHSToken.getService().toString());
response.setDelegationToken(mrDToken);
return response;
} catch (IOException i) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
index 3c7bd8ccd81..00b3c70deb3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistoryServer.java
@@ -28,6 +28,7 @@
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.util.ShutdownHookManager;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -40,6 +41,12 @@
*
*****************************************************************/
public class JobHistoryServer extends CompositeService {
+
+ /**
+ * Priority of the JobHistoryServer shutdown hook.
+ */
+ public static final int SHUTDOWN_HOOK_PRIORITY = 30;
+
private static final Log LOG = LogFactory.getLog(JobHistoryServer.class);
private HistoryContext historyContext;
private HistoryClientService clientService;
@@ -118,8 +125,9 @@ public static void main(String[] args) {
StringUtils.startupShutdownMessage(JobHistoryServer.class, args, LOG);
try {
JobHistoryServer jobHistoryServer = new JobHistoryServer();
- Runtime.getRuntime().addShutdownHook(
- new CompositeServiceShutdownHook(jobHistoryServer));
+ ShutdownHookManager.get().addShutdownHook(
+ new CompositeServiceShutdownHook(jobHistoryServer),
+ SHUTDOWN_HOOK_PRIORITY);
YarnConfiguration conf = new YarnConfiguration(new JobConf());
jobHistoryServer.init(conf);
jobHistoryServer.start();
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
index c2a373750cc..0143cb73913 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
@@ -32,7 +32,6 @@
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.JobStatus;
import org.apache.hadoop.mapreduce.MRJobConfig;
@@ -63,6 +62,7 @@
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.YarnException;
@@ -144,7 +144,7 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
if (application != null) {
trackingUrl = application.getTrackingUrl();
}
- String serviceAddr = null;
+ InetSocketAddress serviceAddr = null;
while (application == null
|| YarnApplicationState.RUNNING == application
.getYarnApplicationState()) {
@@ -172,25 +172,23 @@ private MRClientProtocol getProxy() throws YarnRemoteException {
if(!conf.getBoolean(MRJobConfig.JOB_AM_ACCESS_DISABLED, false)) {
UserGroupInformation newUgi = UserGroupInformation.createRemoteUser(
UserGroupInformation.getCurrentUser().getUserName());
- serviceAddr = application.getHost() + ":" + application.getRpcPort();
+ serviceAddr = NetUtils.createSocketAddrForHost(
+ application.getHost(), application.getRpcPort());
if (UserGroupInformation.isSecurityEnabled()) {
String clientTokenEncoded = application.getClientToken();
Token clientToken =
new Token();
clientToken.decodeFromUrlString(clientTokenEncoded);
// RPC layer client expects ip:port as service for tokens
- InetSocketAddress addr = NetUtils.createSocketAddr(application
- .getHost(), application.getRpcPort());
- clientToken.setService(new Text(addr.getAddress().getHostAddress()
- + ":" + addr.getPort()));
+ SecurityUtil.setTokenService(clientToken, serviceAddr);
newUgi.addToken(clientToken);
}
LOG.debug("Connecting to " + serviceAddr);
- final String tempStr = serviceAddr;
+ final InetSocketAddress finalServiceAddr = serviceAddr;
realProxy = newUgi.doAs(new PrivilegedExceptionAction() {
@Override
public MRClientProtocol run() throws IOException {
- return instantiateAMProxy(tempStr);
+ return instantiateAMProxy(finalServiceAddr);
}
});
} else {
@@ -270,13 +268,13 @@ private MRClientProtocol checkAndGetHSProxy(
return historyServerProxy;
}
- MRClientProtocol instantiateAMProxy(final String serviceAddr)
+ MRClientProtocol instantiateAMProxy(final InetSocketAddress serviceAddr)
throws IOException {
LOG.trace("Connecting to ApplicationMaster at: " + serviceAddr);
YarnRPC rpc = YarnRPC.create(conf);
MRClientProtocol proxy =
(MRClientProtocol) rpc.getProxy(MRClientProtocol.class,
- NetUtils.createSocketAddr(serviceAddr), conf);
+ serviceAddr, conf);
LOG.trace("Connected to ApplicationMaster at: " + serviceAddr);
return proxy;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
index 25069cccf1e..3d00e8af8c9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.mapred;
+import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.HashMap;
@@ -209,4 +210,10 @@ public GetDelegationTokenResponse getDelegationToken(
/* Should not be invoked by anyone. */
throw new NotImplementedException();
}
+
+ @Override
+ public InetSocketAddress getConnectAddress() {
+ /* Should not be invoked by anyone. Normally used to set token service */
+ throw new NotImplementedException();
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
index 79a1d27c2db..62b608aca47 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
@@ -37,8 +37,6 @@
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
-import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@@ -67,14 +65,14 @@
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
+import org.apache.hadoop.yarn.util.ProtoUtils;
// TODO: This should be part of something like yarn-client.
public class ResourceMgrDelegate {
private static final Log LOG = LogFactory.getLog(ResourceMgrDelegate.class);
- private final String rmAddress;
+ private final InetSocketAddress rmAddress;
private YarnConfiguration conf;
ClientRMProtocol applicationsManager;
private ApplicationId applicationId;
@@ -87,11 +85,7 @@ public class ResourceMgrDelegate {
public ResourceMgrDelegate(YarnConfiguration conf) {
this.conf = conf;
YarnRPC rpc = YarnRPC.create(this.conf);
- InetSocketAddress rmAddress = conf.getSocketAddr(
- YarnConfiguration.RM_ADDRESS,
- YarnConfiguration.DEFAULT_RM_ADDRESS,
- YarnConfiguration.DEFAULT_RM_PORT);
- this.rmAddress = rmAddress.toString();
+ this.rmAddress = getRmAddress(conf);
LOG.debug("Connecting to ResourceManager at " + rmAddress);
applicationsManager =
(ClientRMProtocol) rpc.getProxy(ClientRMProtocol.class,
@@ -109,7 +103,13 @@ public ResourceMgrDelegate(YarnConfiguration conf,
ClientRMProtocol applicationsManager) {
this.conf = conf;
this.applicationsManager = applicationsManager;
- this.rmAddress = applicationsManager.toString();
+ this.rmAddress = getRmAddress(conf);
+ }
+
+ private static InetSocketAddress getRmAddress(YarnConfiguration conf) {
+ return conf.getSocketAddr(YarnConfiguration.RM_ADDRESS,
+ YarnConfiguration.DEFAULT_RM_ADDRESS,
+ YarnConfiguration.DEFAULT_RM_PORT);
}
public void cancelDelegationToken(Token arg0)
@@ -168,9 +168,7 @@ public Token getDelegationToken(Text renewer)
org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse
response = applicationsManager.getDelegationToken(rmDTRequest);
DelegationToken yarnToken = response.getRMDelegationToken();
- return new Token(yarnToken.getIdentifier().array(),
- yarnToken.getPassword().array(),
- new Text(yarnToken.getKind()), new Text(yarnToken.getService()));
+ return ProtoUtils.convertFromProtoFormat(yarnToken, rmAddress);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
index 3b00ddf83c5..e6358de35de 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
@@ -56,7 +56,6 @@
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
-import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
@@ -84,6 +83,7 @@
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.ProtoUtils;
/**
@@ -184,7 +184,7 @@ public ClusterMetrics getClusterMetrics() throws IOException,
return resMgrDelegate.getClusterMetrics();
}
- private Token getDelegationTokenFromHS(
+ private Token> getDelegationTokenFromHS(
MRClientProtocol hsProxy, Text renewer) throws IOException,
InterruptedException {
GetDelegationTokenRequest request = recordFactory
@@ -192,10 +192,8 @@ private Token getDelegationTokenFromHS(
request.setRenewer(renewer.toString());
DelegationToken mrDelegationToken = hsProxy.getDelegationToken(request)
.getDelegationToken();
- return new Token(mrDelegationToken
- .getIdentifier().array(), mrDelegationToken.getPassword().array(),
- new Text(mrDelegationToken.getKind()), new Text(
- mrDelegationToken.getService()));
+ return ProtoUtils.convertFromProtoFormat(mrDelegationToken,
+ hsProxy.getConnectAddress());
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
index b51166a11c5..095d3fd9301 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientRedirect.java
@@ -368,6 +368,11 @@ public AMService() {
this(AMHOSTADDRESS);
}
+ @Override
+ public InetSocketAddress getConnectAddress() {
+ return bindAddress;
+ }
+
public AMService(String hostAddress) {
super("AMService");
this.protocol = MRClientProtocol.class;
@@ -390,9 +395,7 @@ public void start(Configuration conf) {
rpc.getServer(protocol, this, address,
conf, null, 1);
server.start();
- this.bindAddress =
- NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
- + ":" + server.getPort());
+ this.bindAddress = NetUtils.getConnectAddress(server);
super.start();
amRunning = true;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
index 55cfeeb9442..a3940054c59 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
@@ -27,6 +27,7 @@
import static org.mockito.Mockito.when;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.util.Arrays;
import java.util.Collection;
@@ -242,7 +243,7 @@ public void testReconnectOnAMRestart() throws IOException {
// should use the same proxy to AM2 and so instantiateProxy shouldn't be
// called.
doReturn(firstGenAMProxy).doReturn(secondGenAMProxy).when(
- clientServiceDelegate).instantiateAMProxy(any(String.class));
+ clientServiceDelegate).instantiateAMProxy(any(InetSocketAddress.class));
JobStatus jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
@@ -257,7 +258,7 @@ public void testReconnectOnAMRestart() throws IOException {
Assert.assertEquals("jobName-secondGen", jobStatus.getJobName());
verify(clientServiceDelegate, times(2)).instantiateAMProxy(
- any(String.class));
+ any(InetSocketAddress.class));
}
@Test
@@ -286,19 +287,19 @@ public void testAMAccessDisabled() throws IOException {
Assert.assertEquals("N/A", jobStatus.getJobName());
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
- any(String.class));
+ any(InetSocketAddress.class));
// Should not reach AM even for second and third times too.
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A", jobStatus.getJobName());
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
- any(String.class));
+ any(InetSocketAddress.class));
jobStatus = clientServiceDelegate.getJobStatus(oldJobId);
Assert.assertNotNull(jobStatus);
Assert.assertEquals("N/A", jobStatus.getJobName());
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
- any(String.class));
+ any(InetSocketAddress.class));
// The third time around, app is completed, so should go to JHS
JobStatus jobStatus1 = clientServiceDelegate.getJobStatus(oldJobId);
@@ -309,7 +310,7 @@ public void testAMAccessDisabled() throws IOException {
Assert.assertEquals(1.0f, jobStatus1.getReduceProgress());
verify(clientServiceDelegate, times(0)).instantiateAMProxy(
- any(String.class));
+ any(InetSocketAddress.class));
}
@Test
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
index 20c00b1da08..792806b624c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
@@ -26,11 +26,9 @@
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.v2.api.HSClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
-import org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
import org.apache.hadoop.mapreduce.v2.hs.JobHistoryServer;
@@ -38,11 +36,11 @@
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.yarn.api.records.DelegationToken;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
+import org.apache.hadoop.yarn.util.ProtoUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
@@ -95,9 +93,8 @@ public DelegationToken run() throws YarnRemoteException {
// Now try talking to JHS using the delegation token
UserGroupInformation ugi =
UserGroupInformation.createRemoteUser("TheDarkLord");
- ugi.addToken(new Token(token.getIdentifier()
- .array(), token.getPassword().array(), new Text(token.getKind()),
- new Text(token.getService())));
+ ugi.addToken(ProtoUtils.convertFromProtoFormat(
+ token, jobHistoryServer.getClientService().getBindAddress()));
final YarnRPC rpc = YarnRPC.create(conf);
MRClientProtocol userUsingDT =
ugi.doAs(new PrivilegedAction() {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
index dd4b3489750..8167102ab86 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestUmbilicalProtocolWithJobToken.java
@@ -47,6 +47,7 @@
import org.apache.hadoop.security.SaslInputStream;
import org.apache.hadoop.security.SaslRpcClient;
import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.log4j.Level;
@@ -98,10 +99,8 @@ public void testJobTokenRpc() throws Exception {
JobTokenIdentifier tokenId = new JobTokenIdentifier(new Text(jobId));
Token token = new Token(tokenId, sm);
sm.addTokenForJob(jobId, token);
- Text host = new Text(addr.getAddress().getHostAddress() + ":"
- + addr.getPort());
- token.setService(host);
- LOG.info("Service IP address for token is " + host);
+ SecurityUtil.setTokenService(token, addr);
+ LOG.info("Service address for token is " + token.getService());
current.addToken(token);
current.doAs(new PrivilegedExceptionAction