From 75c31df9dd1d9435aec4cc245e1e58ca326e6159 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Tue, 10 Sep 2013 18:31:50 +0000 Subject: [PATCH 01/16] MAPREDUCE-5020. Compile failure with JDK8 (Trevor Robinson via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521576 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../org/apache/hadoop/mapreduce/lib/partition/InputSampler.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 13f0079fc3c..24e7da1a7ba 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -256,6 +256,8 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5414. TestTaskAttempt fails in JDK7 with NPE (Nemon Lou via devaraj) + MAPREDUCE-5020. Compile failure with JDK8 (Trevor Robinson via tgraves) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java index 742316875ce..e709a2823aa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java @@ -317,7 +317,7 @@ public class InputSampler extends Configured implements Tool { final InputFormat inf = ReflectionUtils.newInstance(job.getInputFormatClass(), conf); int numPartitions = job.getNumReduceTasks(); - K[] samples = sampler.getSample(inf, job); + K[] samples = (K[])sampler.getSample(inf, job); LOG.info("Using " + samples.length + " samples"); RawComparator comparator = (RawComparator) job.getSortComparator(); From 613979c8fdacf25fd563395ecc399c4de94d3ee7 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 10 Sep 2013 19:29:45 +0000 Subject: [PATCH 02/16] HDFS-5085. Refactor o.a.h.nfs to support different types of authentications. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521601 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/mount/MountResponse.java | 4 +- .../nfs/{security => }/AccessPrivilege.java | 2 +- .../hadoop/nfs/{security => }/NfsExports.java | 2 +- .../apache/hadoop/nfs/nfs3/IdUserGroup.java | 4 +- .../apache/hadoop/nfs/nfs3/Nfs3Constant.java | 3 + .../apache/hadoop/nfs/nfs3/Nfs3Interface.java | 104 +++++---- .../hadoop/oncrpc/RpcAcceptedReply.java | 6 +- .../org/apache/hadoop/oncrpc/RpcCall.java | 21 +- .../apache/hadoop/oncrpc/RpcDeniedReply.java | 2 +- .../java/org/apache/hadoop/oncrpc/XDR.java | 2 +- .../hadoop/oncrpc/security/Credentials.java | 53 +++++ .../CredentialsGSS.java} | 42 ++-- .../oncrpc/security/CredentialsNone.java | 43 ++++ .../oncrpc/security/CredentialsSys.java | 114 +++++++++ .../oncrpc/{ => security}/RpcAuthInfo.java | 27 +-- .../oncrpc/security/SecurityHandler.java | 63 +++++ .../oncrpc/security/SysSecurityHandler.java | 59 +++++ .../hadoop/oncrpc/security/Verifier.java | 49 ++++ .../hadoop/oncrpc/security/VerifierGSS.java | 41 ++++ .../hadoop/oncrpc/security/VerifierNone.java | 41 ++++ .../apache/hadoop/portmap/PortmapRequest.java | 14 +- .../nfs/{security => }/TestNfsExports.java | 4 +- .../hadoop/oncrpc/TestRpcAcceptedReply.java | 5 +- .../org/apache/hadoop/oncrpc/TestRpcCall.java | 10 +- .../TestCredentialsSys.java} | 37 +-- .../{ => security}/TestRpcAuthInfo.java | 16 +- .../hdfs/nfs/mount/RpcProgramMountd.java | 4 +- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 217 +++++++++--------- .../apache/hadoop/hdfs/nfs/TestMountd.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + 30 files changed, 739 insertions(+), 255 deletions(-) rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/{security => }/AccessPrivilege.java (95%) rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/{security => }/NfsExports.java (99%) create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/{RpcAuthSys.java => security/CredentialsGSS.java} (56%) create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/{ => security}/RpcAuthInfo.java (75%) create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java rename hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/{security => }/TestNfsExports.java (98%) rename hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/{TestRpcAuthSys.java => security/TestCredentialsSys.java} (58%) rename hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/{ => security}/TestRpcAuthInfo.java (78%) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index 3839acc1966..a9131e3b509 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -19,10 +19,10 @@ package org.apache.hadoop.mount; import java.util.List; -import org.apache.hadoop.nfs.security.NfsExports; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; /** * Helper class for sending MountResponse diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/AccessPrivilege.java similarity index 95% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/AccessPrivilege.java index 8789ecfb4e3..073d08d567a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/AccessPrivilege.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.nfs.security; +package org.apache.hadoop.nfs; public enum AccessPrivilege { READ_ONLY, diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java similarity index 99% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java index 301f2f0ff72..afe630b9b03 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.nfs.security; +package org.apache.hadoop.nfs; import java.net.InetAddress; import java.util.ArrayList; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java index b5c5aa3789d..c0be5dd2890 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java @@ -147,7 +147,7 @@ public class IdUserGroup { synchronized public String getUserName(int uid, String unknown) { checkAndUpdateMaps(); - String uname = uidNameMap.get(Integer.valueOf(uid)); + String uname = uidNameMap.get(uid); if (uname == null) { uname = unknown; } @@ -156,7 +156,7 @@ public class IdUserGroup { synchronized public String getGroupName(int gid, String unknown) { checkAndUpdateMaps(); - String gname = gidNameMap.get(Integer.valueOf(gid)); + String gname = gidNameMap.get(gid); if (gname == null) { gname = unknown; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index 706c99f47c4..bedb58f0c4a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -205,4 +205,7 @@ public class Nfs3Constant { public static final String FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs"; public static final String ENABLE_FILE_DUMP_KEY = "dfs.nfs3.enableDump"; public static final boolean ENABLE_FILE_DUMP_DEFAULT = true; + + public final static String UNKNOWN_USER = "nobody"; + public final static String UNKNOWN_GROUP = "nobody"; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java index 678631174dd..8486e2ae50e 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java @@ -20,67 +20,83 @@ package org.apache.hadoop.nfs.nfs3; import java.net.InetAddress; import org.apache.hadoop.nfs.nfs3.response.NFS3Response; -import org.apache.hadoop.oncrpc.RpcAuthSys; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.jboss.netty.channel.Channel; /** * RPC procedures as defined in RFC 1813. */ public interface Nfs3Interface { - + /** NULL: Do nothing */ public NFS3Response nullProcedure(); - + /** GETATTR: Get file attributes */ - public NFS3Response getattr(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response getattr(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** SETATTR: Set file attributes */ - public NFS3Response setattr(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response setattr(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** LOOKUP: Lookup filename */ - public NFS3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** ACCESS: Check access permission */ - public NFS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response lookup(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** ACCESS: Check access permission */ + public NFS3Response access(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** READ: Read from file */ - public NFS3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response read(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** WRITE: Write to file */ public NFS3Response write(XDR xdr, Channel channel, int xid, - RpcAuthSys authSys, InetAddress client); - - /** CREATE: Create a file */ - public NFS3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** MKDIR: Create a directory */ - public NFS3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** REMOVE: Remove a file */ - public NFS3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** RMDIR: Remove a directory */ - public NFS3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client); - + SecurityHandler securityHandler, InetAddress client); + + /** CREATE: Create a file */ + public NFS3Response create(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** MKDIR: Create a directory */ + public NFS3Response mkdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** REMOVE: Remove a file */ + public NFS3Response remove(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** RMDIR: Remove a directory */ + public NFS3Response rmdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** RENAME: Rename a file or directory */ - public NFS3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** SYMLINK: Create a symbolic link */ - public NFS3Response symlink(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response rename(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** SYMLINK: Create a symbolic link */ + public NFS3Response symlink(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** READDIR: Read From directory */ - public NFS3Response readdir(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** FSSTAT: Get dynamic file system information */ - public NFS3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response readdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** FSSTAT: Get dynamic file system information */ + public NFS3Response fsstat(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** FSINFO: Get static file system information */ - public NFS3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response fsinfo(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** PATHCONF: Retrieve POSIX information */ - public NFS3Response pathconf(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** COMMIT: Commit cached data on a server to stable storage */ - public NFS3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client); + public NFS3Response pathconf(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** COMMIT: Commit cached data on a server to stable storage */ + public NFS3Response commit(XDR xdr, SecurityHandler securityHandler, + InetAddress client); } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java index b6bd5e7c7b9..c909c283d7d 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.oncrpc; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; /** * Represents RPC message MSG_ACCEPTED reply body. See RFC 1831 for details. @@ -54,7 +56,7 @@ public class RpcAcceptedReply extends RpcReply { public static RpcAcceptedReply read(int xid, RpcMessage.Type messageType, ReplyState replyState, XDR xdr) { - RpcAuthInfo verifier = RpcAuthInfo.read(xdr); + Verifier verifier = Verifier.readFlavorAndVerifier(xdr); AcceptState acceptState = AcceptState.fromValue(xdr.readInt()); return new RpcAcceptedReply(xid, messageType, replyState, verifier, acceptState); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java index ed0f111602c..4c872da8557 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java @@ -19,6 +19,8 @@ package org.apache.hadoop.oncrpc; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; /** * Represents an RPC message of type RPC call as defined in RFC 1831 @@ -30,11 +32,12 @@ public class RpcCall extends RpcMessage { private final int program; private final int version; private final int procedure; - private final RpcAuthInfo credential; - private final RpcAuthInfo verifier; + private final Credentials credential; + private final Verifier verifier; - protected RpcCall(int xid, RpcMessage.Type messageType, int rpcVersion, int program, - int version, int procedure, RpcAuthInfo credential, RpcAuthInfo verifier) { + protected RpcCall(int xid, RpcMessage.Type messageType, int rpcVersion, + int program, int version, int procedure, Credentials credential, + Verifier verifier) { super(xid, messageType); this.rpcVersion = rpcVersion; this.program = program; @@ -79,19 +82,19 @@ public class RpcCall extends RpcMessage { return procedure; } - public RpcAuthInfo getCredential() { + public Credentials getCredential() { return credential; } - public RpcAuthInfo getVerifier() { + public Verifier getVerifier() { return verifier; } public static RpcCall read(XDR xdr) { return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()), - xdr.readInt(), xdr.readInt(), - xdr.readInt(), xdr.readInt(), RpcAuthInfo.read(xdr), - RpcAuthInfo.read(xdr)); + xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(), + Credentials.readFlavorAndCredentials(xdr), + Verifier.readFlavorAndVerifier(xdr)); } public static void write(XDR out, int xid, int program, int progVersion, diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java index 4b91edb397f..8a9af096d1f 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.oncrpc; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; /** * Represents RPC message MSG_DENIED reply body. See RFC 1831 for details. diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java index 40633e286d8..c2f3d06b996 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java @@ -280,7 +280,7 @@ public class XDR { public byte[] readVariableOpaque() { int size = this.readInt(); - return size != 0 ? this.readFixedOpaque(size) : null; + return size != 0 ? this.readFixedOpaque(size) : new byte[0]; } public void skipVariableOpaque() { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java new file mode 100644 index 00000000000..8f641d885aa --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.oncrpc.XDR; + +/** + * Base class for all credentials. Currently we only support 3 different types + * of auth flavors: AUTH_NONE, AUTH_SYS, and RPCSEC_GSS. + */ +public abstract class Credentials extends RpcAuthInfo { + public static final Log LOG = LogFactory.getLog(Credentials.class); + + public static Credentials readFlavorAndCredentials(XDR xdr) { + AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt()); + final Credentials credentials; + if(flavor == AuthFlavor.AUTH_NONE) { + credentials = new CredentialsNone(); + } else if(flavor == AuthFlavor.AUTH_SYS) { + credentials = new CredentialsSys(); + } else if(flavor == AuthFlavor.RPCSEC_GSS) { + credentials = new CredentialsGSS(); + } else { + throw new UnsupportedOperationException("Unsupported Credentials Flavor " + + flavor); + } + credentials.read(xdr); + return credentials; + } + + protected int mCredentialsLength; + + protected Credentials(AuthFlavor flavor) { + super(flavor); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthSys.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsGSS.java similarity index 56% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthSys.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsGSS.java index dbedb3649e4..0dd80dc0895 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthSys.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsGSS.java @@ -15,37 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; -/** - * AUTH_SYS as defined in RFC 1831 - */ -public class RpcAuthSys { - private final int uid; - private final int gid; +import org.apache.hadoop.oncrpc.XDR; - public RpcAuthSys(int uid, int gid) { - this.uid = uid; - this.gid = gid; - } - - public static RpcAuthSys from(byte[] credentials) { - XDR sys = new XDR(credentials); - sys.skip(4); // Stamp - sys.skipVariableOpaque(); // Machine name - return new RpcAuthSys(sys.readInt(), sys.readInt()); - } - - public int getUid() { - return uid; - } +/** Credential used by RPCSEC_GSS */ +public class CredentialsGSS extends Credentials { - public int getGid() { - return gid; + public CredentialsGSS() { + super(AuthFlavor.RPCSEC_GSS); } @Override - public String toString() { - return "(AuthSys: uid=" + uid + " gid=" + gid + ")"; + public void read(XDR xdr) { + // TODO Auto-generated method stub + } + + @Override + public void write(XDR xdr) { + // TODO Auto-generated method stub + + } + } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java new file mode 100644 index 00000000000..753edba49fb --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; + +import com.google.common.base.Preconditions; + +/** Credential used by AUTH_NONE */ +public class CredentialsNone extends Credentials { + + public CredentialsNone() { + super(AuthFlavor.AUTH_NONE); + mCredentialsLength = 0; + } + + @Override + public void read(XDR xdr) { + mCredentialsLength = xdr.readInt(); + Preconditions.checkState(mCredentialsLength == 0); + } + + @Override + public void write(XDR xdr) { + Preconditions.checkState(mCredentialsLength == 0); + xdr.writeInt(mCredentialsLength); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java new file mode 100644 index 00000000000..9ba12b8990e --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.hadoop.oncrpc.XDR; + +/** Credential used by AUTH_SYS */ +public class CredentialsSys extends Credentials { + + private static final String HOSTNAME; + static { + try { + String s = InetAddress.getLocalHost().getHostName(); + HOSTNAME = s; + if(LOG.isDebugEnabled()) { + LOG.debug("HOSTNAME = " + HOSTNAME); + } + } catch (UnknownHostException e) { + LOG.error("Error setting HOSTNAME", e); + throw new RuntimeException(e); + } + } + + protected int mUID, mGID; + protected int[] mAuxGIDs; + protected String mHostName; + protected int mStamp; + + public CredentialsSys() { + super(AuthFlavor.AUTH_SYS); + this.mCredentialsLength = 0; + this.mHostName = HOSTNAME; + } + + public int getGID() { + return mGID; + } + + public int getUID() { + return mUID; + } + + public void setGID(int gid) { + this.mGID = gid; + } + + public void setUID(int uid) { + this.mUID = uid; + } + + public void setStamp(int stamp) { + this.mStamp = stamp; + } + + @Override + public void read(XDR xdr) { + mCredentialsLength = xdr.readInt(); + + mStamp = xdr.readInt(); + mHostName = xdr.readString(); + mUID = xdr.readInt(); + mGID = xdr.readInt(); + + int length = xdr.readInt(); + mAuxGIDs = new int[length]; + for (int i = 0; i < length; i++) { + mAuxGIDs[i] = xdr.readInt(); + } + } + + @Override + public void write(XDR xdr) { + // mStamp + mHostName.length + mHostName + mUID + mGID + mAuxGIDs.count + mCredentialsLength = 20 + mHostName.getBytes().length; + // mAuxGIDs + if (mAuxGIDs != null && mAuxGIDs.length > 0) { + mCredentialsLength += mAuxGIDs.length * 4; + } + xdr.writeInt(mCredentialsLength); + + xdr.writeInt(mStamp); + xdr.writeString(mHostName); + xdr.writeInt(mUID); + xdr.writeInt(mGID); + + if((mAuxGIDs == null) || (mAuxGIDs.length == 0)) { + xdr.writeInt(0); + } else { + xdr.writeInt(mAuxGIDs.length); + for (int i = 0; i < mAuxGIDs.length; i++) { + xdr.writeInt(mAuxGIDs[i]); + } + } + } + +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthInfo.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/RpcAuthInfo.java similarity index 75% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthInfo.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/RpcAuthInfo.java index a507d0d20de..65ebd304a25 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthInfo.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/RpcAuthInfo.java @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; -import java.util.Arrays; +import org.apache.hadoop.oncrpc.XDR; /** - * Authentication Info as defined in RFC 1831 + * Authentication Info. Base class of Verifier and Credential. */ -public class RpcAuthInfo { +public abstract class RpcAuthInfo { /** Different types of authentication as defined in RFC 1831 */ public enum AuthFlavor { AUTH_NONE(0), @@ -52,27 +52,20 @@ public class RpcAuthInfo { } private final AuthFlavor flavor; - private final byte[] body; - protected RpcAuthInfo(AuthFlavor flavor, byte[] body) { + protected RpcAuthInfo(AuthFlavor flavor) { this.flavor = flavor; - this.body = body; } - public static RpcAuthInfo read(XDR xdr) { - int type = xdr.readInt(); - AuthFlavor flavor = AuthFlavor.fromValue(type); - byte[] body = xdr.readVariableOpaque(); - return new RpcAuthInfo(flavor, body); - } + /** Load auth info */ + public abstract void read(XDR xdr); + + /** Write auth info */ + public abstract void write(XDR xdr); public AuthFlavor getFlavor() { return flavor; } - - public byte[] getBody() { - return Arrays.copyOf(body, body.length); - } @Override public String toString() { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java new file mode 100644 index 00000000000..40004d0e786 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.oncrpc.RpcCall; +import org.apache.hadoop.oncrpc.XDR; + +public abstract class SecurityHandler { + public static final Log LOG = LogFactory.getLog(SecurityHandler.class); + + public abstract String getUser(); + + public abstract boolean shouldSilentlyDrop(RpcCall request); + + public abstract Verifier getVerifer(RpcCall request) throws IOException; + + public boolean isUnwrapRequired() { + return false; + } + + public boolean isWrapRequired() { + return false; + } + + /** Used by GSS */ + public XDR unwrap(RpcCall request, byte[] data ) throws IOException { + throw new UnsupportedOperationException(); + } + + /** Used by GSS */ + public byte[] wrap(RpcCall request, XDR response) throws IOException { + throw new UnsupportedOperationException(); + } + + /** Used by AUTH_SYS */ + public int getUid() { + throw new UnsupportedOperationException(); + } + + /** Used by AUTH_SYS */ + public int getGid() { + throw new UnsupportedOperationException(); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java new file mode 100644 index 00000000000..196d3d8cbba --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.nfs.nfs3.IdUserGroup; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.apache.hadoop.oncrpc.RpcCall; + +public class SysSecurityHandler extends SecurityHandler { + + private final IdUserGroup iug; + private final CredentialsSys mCredentialsSys; + + public SysSecurityHandler(CredentialsSys credentialsSys, + IdUserGroup iug) { + this.mCredentialsSys = credentialsSys; + this.iug = iug; + } + + @Override + public String getUser() { + return iug.getUserName(mCredentialsSys.getUID(), Nfs3Constant.UNKNOWN_USER); + } + + @Override + public boolean shouldSilentlyDrop(RpcCall request) { + return false; + } + + @Override + public VerifierNone getVerifer(RpcCall request) { + return new VerifierNone(); + } + + @Override + public int getUid() { + return mCredentialsSys.getUID(); + } + + @Override + public int getGid() { + return mCredentialsSys.getGID(); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java new file mode 100644 index 00000000000..f8344b2800d --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; + +/** + * Base class for verifier. Currently we only support 3 types of auth flavors: + * {@link AuthFlavor#AUTH_NONE}, {@link AuthFlavor#AUTH_SYS}, + * and {@link AuthFlavor#RPCSEC_GSS}. + */ +public abstract class Verifier extends RpcAuthInfo { + + protected Verifier(AuthFlavor flavor) { + super(flavor); + } + + public static Verifier readFlavorAndVerifier(XDR xdr) { + AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt()); + final Verifier verifer; + if(flavor == AuthFlavor.AUTH_NONE) { + verifer = new VerifierNone(); + } else if(flavor == AuthFlavor.RPCSEC_GSS) { + verifer = new VerifierGSS(); + } else { + throw new UnsupportedOperationException("Unsupported verifier flavor" + + flavor); + } + verifer.read(xdr); + return verifer; + } + +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java new file mode 100644 index 00000000000..7410070b127 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; + +/** Verifier mapped to RPCSEC_GSS. */ +public class VerifierGSS extends Verifier { + + public VerifierGSS() { + super(AuthFlavor.RPCSEC_GSS); + } + + @Override + public void read(XDR xdr) { + // TODO Auto-generated method stub + + } + + @Override + public void write(XDR xdr) { + // TODO Auto-generated method stub + + } + +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java new file mode 100644 index 00000000000..8bccd1b9be2 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; + +import com.google.common.base.Preconditions; + +/** Verifier used by AUTH_NONE. */ +public class VerifierNone extends Verifier { + + public VerifierNone() { + super(AuthFlavor.AUTH_NONE); + } + + @Override + public void read(XDR xdr) { + int length = xdr.readInt(); + Preconditions.checkState(length == 0); + } + + @Override + public void write(XDR xdr) { + xdr.writeInt(0); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java index 11da7d44dc2..98dd1ffa635 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.portmap; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.VerifierNone; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.portmap.PortmapInterface.Procedure; /** @@ -38,9 +42,11 @@ public class PortmapRequest { RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, Procedure.PMAPPROC_SET.getValue()); request.writeInt(AuthFlavor.AUTH_NONE.getValue()); - request.writeInt(0); - request.writeInt(0); - request.writeInt(0); + Credentials credential = new CredentialsNone(); + credential.write(request); + request.writeInt(AuthFlavor.AUTH_NONE.getValue()); + Verifier verifier = new VerifierNone(); + verifier.write(request); return mapping.serialize(request); } } diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java similarity index 98% rename from hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java index dbadd8ba339..9acb29e589a 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java @@ -15,10 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.nfs.security; +package org.apache.hadoop.nfs; import junit.framework.Assert; +import org.apache.hadoop.nfs.AccessPrivilege; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java index fca0ff5a7f8..2daa48cce83 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java @@ -20,8 +20,9 @@ package org.apache.hadoop.oncrpc; import static org.junit.Assert.assertEquals; import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.RpcReply.ReplyState; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.junit.Test; /** @@ -45,7 +46,7 @@ public class TestRpcAcceptedReply { @Test public void testConstructor() { - RpcAuthInfo verifier = new RpcAuthInfo(AuthFlavor.AUTH_NONE, new byte[0]); + Verifier verifier = new VerifierNone(); RpcAcceptedReply reply = new RpcAcceptedReply(0, RpcMessage.Type.RPC_REPLY, ReplyState.MSG_ACCEPTED, verifier, AcceptState.SUCCESS); assertEquals(0, reply.getXid()); diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java index eb168034524..b35931776d5 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java @@ -17,8 +17,12 @@ */ package org.apache.hadoop.oncrpc; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.junit.Test; /** @@ -28,8 +32,8 @@ public class TestRpcCall { @Test public void testConstructor() { - RpcAuthInfo credential = new RpcAuthInfo(AuthFlavor.AUTH_NONE, new byte[0]); - RpcAuthInfo verifier = new RpcAuthInfo(AuthFlavor.AUTH_NONE, new byte[0]); + Credentials credential = new CredentialsNone(); + Verifier verifier = new VerifierNone(); int rpcVersion = RpcCall.RPC_VERSION; int program = 2; int version = 3; diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthSys.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java similarity index 58% rename from hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthSys.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java index 474a1f73789..0fd183e782d 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthSys.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java @@ -15,31 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; import static org.junit.Assert.assertEquals; +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsSys; import org.junit.Test; /** - * Test for {@link RpcAuthSys} + * Test for {@link CredentialsSys} */ -public class TestRpcAuthSys { +public class TestCredentialsSys { + @Test - public void testConstructor() { - RpcAuthSys auth = new RpcAuthSys(0, 1); - assertEquals(0, auth.getUid()); - assertEquals(1, auth.getGid()); - } - - @Test - public void testRead() { - byte[] bytes = {0, 1, 2, 3}; // 4 bytes Stamp - bytes = XDR.append(bytes, XDR.getVariableOpque(new byte[0])); - bytes = XDR.append(bytes, XDR.toBytes(0)); // gid - bytes = XDR.append(bytes, XDR.toBytes(1)); // uid - RpcAuthSys auth = RpcAuthSys.from(bytes); - assertEquals(0, auth.getUid()); - assertEquals(1, auth.getGid()); + public void testReadWrite() { + CredentialsSys credential = new CredentialsSys(); + credential.setUID(0); + credential.setGID(1); + + XDR xdr = new XDR(); + credential.write(xdr); + + CredentialsSys newCredential = new CredentialsSys(); + newCredential.read(xdr); + + assertEquals(0, newCredential.getUID()); + assertEquals(1, newCredential.getGID()); } } diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthInfo.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestRpcAuthInfo.java similarity index 78% rename from hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthInfo.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestRpcAuthInfo.java index 0b8240bc605..755a6eb9497 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthInfo.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestRpcAuthInfo.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import java.util.Arrays; - -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.junit.Test; /** @@ -42,12 +40,4 @@ public class TestRpcAuthInfo { public void testInvalidAuthFlavor() { assertEquals(AuthFlavor.AUTH_NONE, AuthFlavor.fromValue(4)); } - - @Test - public void testConsturctor() { - byte[] body = new byte[0]; - RpcAuthInfo auth = new RpcAuthInfo(AuthFlavor.AUTH_NONE, body); - assertEquals(AuthFlavor.AUTH_NONE, auth.getFlavor()); - assertTrue(Arrays.equals(body, auth.getBody())); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index 5b5ea511d19..1ff7f3f45b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -32,10 +32,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.mount.MountEntry; import org.apache.hadoop.mount.MountInterface; import org.apache.hadoop.mount.MountResponse; +import org.apache.hadoop.nfs.AccessPrivilege; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; -import org.apache.hadoop.nfs.security.AccessPrivilege; -import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcProgram; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 404cf3e73ca..b935119b6be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -42,6 +42,8 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.nfs.AccessPrivilege; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.NfsTime; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.IdUserGroup; @@ -96,16 +98,18 @@ import org.apache.hadoop.nfs.nfs3.response.VoidResponse; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; -import org.apache.hadoop.nfs.security.AccessPrivilege; -import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; -import org.apache.hadoop.oncrpc.RpcAuthSys; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcDeniedReply; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcReply; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsSys; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.SecurityHandler; +import org.apache.hadoop.oncrpc.security.SysSecurityHandler; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.security.AccessControlException; import org.jboss.netty.channel.Channel; @@ -205,8 +209,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public GETATTR3Response getattr(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public GETATTR3Response getattr(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -214,8 +218,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -268,9 +271,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (updateFields.contains(SetAttrField.UID) || updateFields.contains(SetAttrField.GID)) { String uname = updateFields.contains(SetAttrField.UID) ? iug.getUserName( - newAttr.getUid(), UNKNOWN_USER) : null; + newAttr.getUid(), Nfs3Constant.UNKNOWN_USER) : null; String gname = updateFields.contains(SetAttrField.GID) ? iug - .getGroupName(newAttr.getGid(), UNKNOWN_GROUP) : null; + .getGroupName(newAttr.getGid(), Nfs3Constant.UNKNOWN_GROUP) : null; dfsClient.setOwner(fileIdPath, uname, gname); } @@ -287,11 +290,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public SETATTR3Response setattr(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public SETATTR3Response setattr(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -364,7 +366,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public LOOKUP3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public LOOKUP3Response lookup(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -372,8 +375,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -426,7 +428,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public ACCESS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public ACCESS3Response access(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -434,8 +437,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -464,8 +466,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.error("Can't get path for fileId:" + handle.getFileId()); return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE); } - int access = Nfs3Utils.getAccessRightsForUserGroup(authSys.getUid(), - authSys.getGid(), attrs); + int access = Nfs3Utils.getAccessRightsForUserGroup( + securityHandler.getUid(), securityHandler.getGid(), attrs); return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access); } catch (IOException e) { @@ -474,13 +476,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public READLINK3Response readlink(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public READLINK3Response readlink(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { return new READLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override - public READ3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public READ3Response read(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { READ3Response response = new READ3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -488,8 +491,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -534,8 +536,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } return new READ3Response(Nfs3Status.NFS3ERR_NOENT); } - int access = Nfs3Utils.getAccessRightsForUserGroup(authSys.getUid(), - authSys.getGid(), attrs); + int access = Nfs3Utils.getAccessRightsForUserGroup( + securityHandler.getUid(), securityHandler.getGid(), attrs); if ((access & Nfs3Constant.ACCESS3_READ) != 0) { eof = offset < attrs.getSize() ? false : true; return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof, @@ -578,10 +580,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { @Override public WRITE3Response write(XDR xdr, Channel channel, int xid, - RpcAuthSys authSys, InetAddress client) { + SecurityHandler securityHandler, InetAddress client) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -653,10 +655,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public CREATE3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public CREATE3Response create(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -725,7 +727,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { // Set group if it's not specified in the request. if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) { setAttr3.getUpdateFields().add(SetAttrField.GID); - setAttr3.setGid(authSys.getGid()); + setAttr3.setGid(securityHandler.getGid()); } setattrInternal(dfsClient, fileIdPath, setAttr3, false); } @@ -776,10 +778,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public MKDIR3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -834,7 +836,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { // Set group if it's not specified in the request. if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) { setAttr3.getUpdateFields().add(SetAttrField.GID); - setAttr3.setGid(authSys.getGid()); + setAttr3.setGid(securityHandler.getGid()); } setattrInternal(dfsClient, fileIdPath, setAttr3, false); @@ -866,15 +868,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public READDIR3Response mknod(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public READDIR3Response mknod(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override - public REMOVE3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public REMOVE3Response remove(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -947,10 +950,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public RMDIR3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1030,10 +1033,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public RENAME3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public RENAME3Response rename(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1118,18 +1121,18 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public SYMLINK3Response symlink(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public SYMLINK3Response symlink(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { return new SYMLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); } - public READDIR3Response link(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public READDIR3Response link(XDR xdr, SecurityHandler securityHandler, InetAddress client) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override - public READDIR3Response readdir(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public READDIR3Response readdir(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1137,8 +1140,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1269,14 +1271,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { dirStatus.getModificationTime(), dirList); } - public READDIRPLUS3Response readdirplus(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public READDIRPLUS3Response readdirplus(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES); } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT); } @@ -1420,7 +1421,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public FSSTAT3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public FSSTAT3Response fsstat(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1428,8 +1430,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1478,7 +1479,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public FSINFO3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public FSINFO3Response fsinfo(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1486,8 +1488,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1530,8 +1531,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public PATHCONF3Response pathconf(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public PATHCONF3Response pathconf(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1539,8 +1540,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1578,10 +1578,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public COMMIT3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public COMMIT3Response commit(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1645,12 +1645,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { Nfs3Constant.WRITE_COMMIT_VERF); } } - - private final static String UNKNOWN_USER = "nobody"; - private final static String UNKNOWN_GROUP = "nobody"; - private String authSysCheck(RpcAuthSys authSys) { - return iug.getUserName(authSys.getUid(), UNKNOWN_USER); + private SecurityHandler getSecurityHandler(Credentials credentials, + Verifier verifier) { + if (credentials instanceof CredentialsSys) { + return new SysSecurityHandler((CredentialsSys) credentials, iug); + } else { + // TODO: support GSS and handle other cases + return null; + } } @Override @@ -1658,67 +1661,71 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { InetAddress client, Channel channel) { final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure()); int xid = rpcCall.getXid(); - RpcAuthSys authSys = null; - + + Credentials credentials = rpcCall.getCredential(); // Ignore auth only for NFSPROC3_NULL, especially for Linux clients. if (nfsproc3 != NFSPROC3.NULL) { - if (rpcCall.getCredential().getFlavor() != AuthFlavor.AUTH_SYS) { + if (rpcCall.getCredential().getFlavor() != AuthFlavor.AUTH_SYS + && rpcCall.getCredential().getFlavor() != AuthFlavor.RPCSEC_GSS) { LOG.info("Wrong RPC AUTH flavor, " - + rpcCall.getCredential().getFlavor() + " is not AUTH_SYS."); + + rpcCall.getCredential().getFlavor() + + " is not AUTH_SYS or RPCSEC_GSS."); XDR reply = new XDR(); reply = RpcDeniedReply.voidReply(reply, xid, RpcReply.ReplyState.MSG_ACCEPTED, RpcDeniedReply.RejectState.AUTH_ERROR); return reply; } - authSys = RpcAuthSys.from(rpcCall.getCredential().getBody()); } + SecurityHandler securityHandler = getSecurityHandler(credentials, + rpcCall.getVerifier()); + NFS3Response response = null; if (nfsproc3 == NFSPROC3.NULL) { response = nullProcedure(); } else if (nfsproc3 == NFSPROC3.GETATTR) { - response = getattr(xdr, authSys, client); + response = getattr(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.SETATTR) { - response = setattr(xdr, authSys, client); + response = setattr(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.LOOKUP) { - response = lookup(xdr, authSys, client); + response = lookup(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.ACCESS) { - response = access(xdr, authSys, client); + response = access(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READLINK) { - response = readlink(xdr, authSys, client); + response = readlink(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READ) { - response = read(xdr, authSys, client); + response = read(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.WRITE) { - response = write(xdr, channel, xid, authSys, client); + response = write(xdr, channel, xid, securityHandler, client); } else if (nfsproc3 == NFSPROC3.CREATE) { - response = create(xdr, authSys, client); + response = create(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.MKDIR) { - response = mkdir(xdr, authSys, client); + response = mkdir(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.SYMLINK) { - response = symlink(xdr, authSys, client); + response = symlink(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.MKNOD) { - response = mknod(xdr, authSys, client); + response = mknod(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.REMOVE) { - response = remove(xdr, authSys, client); + response = remove(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.RMDIR) { - response = rmdir(xdr, authSys, client); + response = rmdir(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.RENAME) { - response = rename(xdr, authSys, client); + response = rename(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.LINK) { - response = link(xdr, authSys, client); + response = link(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READDIR) { - response = readdir(xdr, authSys, client); + response = readdir(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READDIRPLUS) { - response = readdirplus(xdr, authSys, client); + response = readdirplus(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.FSSTAT) { - response = fsstat(xdr, authSys, client); + response = fsstat(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.FSINFO) { - response = fsinfo(xdr, authSys, client); + response = fsinfo(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.PATHCONF) { - response = pathconf(xdr, authSys, client); + response = pathconf(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.COMMIT) { - response = commit(xdr, authSys, client); + response = commit(xdr, securityHandler, client); } else { // Invalid procedure RpcAcceptedReply.voidReply(out, xid, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java index 0e51340e49d..7965a140d7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java @@ -42,7 +42,7 @@ public class TestMountd { // Start minicluster Configuration config = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1) - .manageNameDfsDirs(false).build(); + .build(); cluster.waitActive(); // Start nfs diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 735dd2634e4..ee6ff0f922a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -330,6 +330,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5118. Provide testing support for DFSClient to drop RPC responses. (jing9) + HDFS-5085. Refactor o.a.h.nfs to support different types of + authentications. (jing9) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 220b21732b4a08ebf3a51f4d5ba64d0acc7c4b39 Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Tue, 10 Sep 2013 19:53:49 +0000 Subject: [PATCH 03/16] YARN-1119. Add ClusterMetrics checks to tho TestRMNodeTransitions tests (Mit Desai via jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521611 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../TestRMNodeTransitions.java | 42 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 76365a8b885..646b6b25c55 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -1294,6 +1294,9 @@ Release 0.23.10 - UNRELEASED YARN-985. Nodemanager should log where a resource was localized (Ravi Prakash via jeagles) + YARN-1119. Add ClusterMetrics checks to tho TestRMNodeTransitions tests + (Mit Desai via jeagles) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 694d2826bfe..608fa6449b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -260,7 +260,21 @@ public class TestRMNodeTransitions { @Test public void testRunningExpire() { RMNodeImpl node = getRunningNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE)); + Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost + 1, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.LOST, node.getState()); } @@ -297,8 +311,22 @@ public class TestRMNodeTransitions { @Test public void testRunningDecommission() { RMNodeImpl node = getRunningNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.DECOMMISSION)); + Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned + 1, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.DECOMMISSIONED, node.getState()); } @@ -327,8 +355,22 @@ public class TestRMNodeTransitions { @Test public void testRunningRebooting() { RMNodeImpl node = getRunningNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING)); + Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted + 1, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.REBOOTED, node.getState()); } From b5c6ff164a9a13f04cbe48d1fc14ceb71154525a Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Tue, 10 Sep 2013 22:51:27 +0000 Subject: [PATCH 04/16] YARN-1025. ResourceManager and NodeManager do not load native libraries on Windows. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521670 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 646b6b25c55..270dd247e6a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -182,6 +182,9 @@ Release 2.1.1-beta - UNRELEASED data structures thread safe to avoid RM crashing with ArrayIndexOutOfBoundsException. (Zhijie Shen via vinodkv) + YARN-1025. ResourceManager and NodeManager do not load native libraries on + Windows. (cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd index 031cce7bd6b..955df46245b 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd @@ -135,6 +135,10 @@ if "%1" == "--config" ( call :%yarn-command% %yarn-command-arguments% + if defined JAVA_LIBRARY_PATH ( + set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% + ) + set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments% call %JAVA% %java_arguments% From 1c1ebc1553650ac8e4486faf21f0d95150f607ad Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 11 Sep 2013 00:38:49 +0000 Subject: [PATCH 05/16] MAPREDUCE-5497. Changed MRAppMaster to sleep only after doing everything else but just before ClientService to avoid race conditions during RM restart. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521699 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 + .../hadoop/mapreduce/v2/app/MRAppMaster.java | 26 ++-- .../v2/app/client/ClientService.java | 40 +++--- .../v2/app/client/MRClientService.java | 5 +- .../apache/hadoop/mapreduce/v2/app/MRApp.java | 3 +- .../app/TestMRAppComponentDependencies.java | 121 ++++++++++++++++++ .../mapreduce/v2/app/TestStagingCleanup.java | 33 +---- 7 files changed, 170 insertions(+), 62 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 24e7da1a7ba..3dc14de172b 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -186,6 +186,10 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5478. TeraInputFormat unnecessarily defines its own FileSplit subclass (Sandy Ryza) + MAPREDUCE-5497. Changed MRAppMaster to sleep only after doing everything else + but just before ClientService to avoid race conditions during RM restart. + (Jian He via vinodkv) + OPTIMIZATIONS MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 58f4deef3a1..24db757e4f6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -362,7 +362,10 @@ public class MRAppMaster extends CompositeService { //service to handle requests from JobClient clientService = createClientService(context); - addIfService(clientService); + // Init ClientService separately so that we stop it separately, since this + // service needs to wait some time before it stops so clients can know the + // final states + clientService.init(conf); containerAllocator = createContainerAllocator(clientService, context); @@ -425,7 +428,6 @@ public class MRAppMaster extends CompositeService { // queued inside the JobHistoryEventHandler addIfService(historyService); } - super.serviceInit(conf); } // end of init() @@ -534,14 +536,6 @@ public class MRAppMaster extends CompositeService { } } - // TODO:currently just wait for some time so clients can know the - // final states. Will be removed once RM come on. - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - try { //if isLastAMRetry comes as true, should never set it to false if ( !isLastAMRetry){ @@ -556,6 +550,14 @@ public class MRAppMaster extends CompositeService { LOG.info("Calling stop for all the services"); MRAppMaster.this.stop(); + // TODO: Stop ClientService last, since only ClientService should wait for + // some time so clients can know the final states. Will be removed once RM come on. + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + clientService.stop(); } catch (Throwable t) { LOG.warn("Graceful stop failed ", t); } @@ -1019,8 +1021,10 @@ public class MRAppMaster extends CompositeService { LOG.info("MRAppMaster launching normal, non-uberized, multi-container " + "job " + job.getID() + "."); } + // Start ClientService here, since it's not initialized if + // errorHappenedShutDown is true + clientService.start(); } - //start all the components super.serviceStart(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java index a4c0a0d14bd..4727ffd8710 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java @@ -1,28 +1,30 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.mapreduce.v2.app.client; import java.net.InetSocketAddress; -public interface ClientService { +import org.apache.hadoop.service.Service; - InetSocketAddress getBindAddress(); +public interface ClientService extends Service { - int getHttpPort(); + public abstract InetSocketAddress getBindAddress(); + + public abstract int getHttpPort(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index d36bf62fdf0..181fd3740a9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -94,8 +94,7 @@ import org.apache.hadoop.yarn.webapp.WebApps; * jobclient (user facing). * */ -public class MRClientService extends AbstractService - implements ClientService { +public class MRClientService extends AbstractService implements ClientService { static final Log LOG = LogFactory.getLog(MRClientService.class); @@ -106,7 +105,7 @@ public class MRClientService extends AbstractService private AppContext appContext; public MRClientService(AppContext appContext) { - super("MRClientService"); + super(MRClientService.class.getName()); this.appContext = appContext; this.protocolHandler = new MRClientProtocolHandler(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index 6d1804f0627..76fd00ad848 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -55,6 +55,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; +import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEvent; import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler; import org.apache.hadoop.mapreduce.v2.app.job.Job; @@ -603,7 +604,7 @@ public class MRApp extends MRAppMaster { @Override protected ClientService createClientService(AppContext context) { - return new ClientService(){ + return new MRClientService(context) { @Override public InetSocketAddress getBindAddress() { return NetUtils.createSocketAddr("localhost:9876"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java new file mode 100644 index 00000000000..52ee2c7017d --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app; + +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; +import org.apache.hadoop.mapreduce.v2.api.records.JobState; +import org.apache.hadoop.mapreduce.v2.app.client.ClientService; +import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; +import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; +import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.junit.Test; + +public class TestMRAppComponentDependencies { + + @Test(timeout = 20000) + public void testComponentStopOrder() throws Exception { + @SuppressWarnings("resource") + TestMRApp app = new TestMRApp(1, 1, true, this.getClass().getName(), true); + JobImpl job = (JobImpl) app.submit(new Configuration()); + app.waitForState(job, JobState.SUCCEEDED); + app.verifyCompleted(); + + int waitTime = 20 * 1000; + while (waitTime > 0 && app.numStops < 2) { + Thread.sleep(100); + waitTime -= 100; + } + + // assert JobHistoryEventHandlerStopped and then clientServiceStopped + Assert.assertEquals(1, app.JobHistoryEventHandlerStopped); + Assert.assertEquals(2, app.clientServiceStopped); + } + + private final class TestMRApp extends MRApp { + int JobHistoryEventHandlerStopped; + int clientServiceStopped; + int numStops; + + public TestMRApp(int maps, int reduces, boolean autoComplete, + String testName, boolean cleanOnStart) { + super(maps, reduces, autoComplete, testName, cleanOnStart); + JobHistoryEventHandlerStopped = 0; + clientServiceStopped = 0; + numStops = 0; + } + + @Override + protected Job createJob(Configuration conf, JobStateInternal forcedState, + String diagnostic) { + UserGroupInformation currentUser = null; + try { + currentUser = UserGroupInformation.getCurrentUser(); + } catch (IOException e) { + throw new YarnRuntimeException(e); + } + Job newJob = + new TestJob(getJobId(), getAttemptID(), conf, getDispatcher() + .getEventHandler(), getTaskAttemptListener(), getContext() + .getClock(), getCommitter(), isNewApiCommitter(), + currentUser.getUserName(), getContext(), forcedState, diagnostic); + ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob); + + getDispatcher().register(JobFinishEvent.Type.class, + createJobFinishEventHandler()); + + return newJob; + } + + @Override + protected ClientService createClientService(AppContext context) { + return new MRClientService(context) { + @Override + public void serviceStop() throws Exception { + numStops++; + clientServiceStopped = numStops; + super.serviceStop(); + } + }; + } + + @Override + protected EventHandler createJobHistoryHandler( + AppContext context) { + return new JobHistoryEventHandler(context, getStartCount()) { + @Override + public void serviceStop() throws Exception { + numStops++; + JobHistoryEventHandlerStopped = numStops; + super.serviceStop(); + } + }; + } + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index a0c0cb6c35f..496c1e35068 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; +import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; @@ -284,14 +285,12 @@ import org.junit.Test; private final class MRAppTestCleanup extends MRApp { int stagingDirCleanedup; int ContainerAllocatorStopped; - int JobHistoryEventHandlerStopped; int numStops; public MRAppTestCleanup(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) { super(maps, reduces, autoComplete, testName, cleanOnStart); stagingDirCleanedup = 0; ContainerAllocatorStopped = 0; - JobHistoryEventHandlerStopped = 0; numStops = 0; } @@ -318,26 +317,6 @@ import org.junit.Test; return newJob; } - @Override - protected EventHandler createJobHistoryHandler( - AppContext context) { - return new TestJobHistoryEventHandler(context, getStartCount()); - } - - private class TestJobHistoryEventHandler extends JobHistoryEventHandler { - - public TestJobHistoryEventHandler(AppContext context, int startCount) { - super(context, startCount); - } - - @Override - public void serviceStop() throws Exception { - numStops++; - JobHistoryEventHandlerStopped = numStops; - super.serviceStop(); - } - } - @Override protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context) { @@ -405,15 +384,13 @@ import org.junit.Test; app.verifyCompleted(); int waitTime = 20 * 1000; - while (waitTime > 0 && app.numStops < 3 ) { + while (waitTime > 0 && app.numStops < 2) { Thread.sleep(100); waitTime -= 100; } - // assert JobHistoryEventHandlerStopped first, then - // ContainerAllocatorStopped, and then stagingDirCleanedup - Assert.assertEquals(1, app.JobHistoryEventHandlerStopped); - Assert.assertEquals(2, app.ContainerAllocatorStopped); - Assert.assertEquals(3, app.stagingDirCleanedup); + // assert ContainerAllocatorStopped and then tagingDirCleanedup + Assert.assertEquals(1, app.ContainerAllocatorStopped); + Assert.assertEquals(2, app.stagingDirCleanedup); } } From e4374d803663c626de610cd5f062f25a6d7d5d4e Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 11 Sep 2013 19:57:37 +0000 Subject: [PATCH 06/16] HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522012 13f79535-47bb-0310-9956-ffa450edef68 --- .../security/token/TokenIdentifier.java | 19 ++++++ .../AbstractDelegationTokenSecretManager.java | 38 +++++++++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../DelegationTokenSecretManager.java | 18 ++++- .../hdfs/server/namenode/FSNamesystem.java | 46 +++++++++++-- .../hdfs/server/namenode/HdfsAuditLogger.java | 66 +++++++++++++++++++ 7 files changed, 178 insertions(+), 13 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java index a33bcde7072..ebf9d58b37c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java @@ -21,6 +21,7 @@ package org.apache.hadoop.security.token; import java.io.IOException; import java.util.Arrays; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.DataOutputBuffer; @@ -35,6 +36,9 @@ import org.apache.hadoop.security.UserGroupInformation; @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public abstract class TokenIdentifier implements Writable { + + private String trackingId = null; + /** * Get the token kind * @return the kind of the token @@ -62,4 +66,19 @@ public abstract class TokenIdentifier implements Writable { } return Arrays.copyOf(buf.getData(), buf.getLength()); } + + /** + * Returns a tracking identifier that can be used to associate usages of a + * token across multiple client sessions. + * + * Currently, this function just returns an MD5 of {{@link #getBytes()}. + * + * @return tracking identifier + */ + public String getTrackingId() { + if (trackingId == null) { + trackingId = DigestUtils.md5Hex(getBytes()); + } + return trackingId; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 42085acb178..a63e0628288 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -86,6 +86,11 @@ extends AbstractDelegationTokenIdentifier> private long tokenMaxLifetime; private long tokenRemoverScanInterval; private long tokenRenewInterval; + /** + * Whether to store a token's tracking ID in its TokenInformation. + * Can be overridden by a subclass. + */ + protected boolean storeTokenTrackingId; private Thread tokenRemoverThread; protected volatile boolean running; @@ -102,6 +107,7 @@ extends AbstractDelegationTokenIdentifier> this.tokenMaxLifetime = delegationTokenMaxLifetime; this.tokenRenewInterval = delegationTokenRenewInterval; this.tokenRemoverScanInterval = delegationTokenRemoverScanInterval; + this.storeTokenTrackingId = false; } /** should be called before this object is used */ @@ -201,7 +207,7 @@ extends AbstractDelegationTokenIdentifier> } if (currentTokens.get(identifier) == null) { currentTokens.put(identifier, new DelegationTokenInformation(renewDate, - password)); + password, getTrackingIdIfEnabled(identifier))); } else { throw new IOException( "Same delegation token being added twice."); @@ -280,7 +286,7 @@ extends AbstractDelegationTokenIdentifier> byte[] password = createPassword(identifier.getBytes(), currentKey.getKey()); storeNewToken(identifier, now + tokenRenewInterval); currentTokens.put(identifier, new DelegationTokenInformation(now - + tokenRenewInterval, password)); + + tokenRenewInterval, password, getTrackingIdIfEnabled(identifier))); return password; } @@ -299,6 +305,21 @@ extends AbstractDelegationTokenIdentifier> return info.getPassword(); } + protected String getTrackingIdIfEnabled(TokenIdent ident) { + if (storeTokenTrackingId) { + return ident.getTrackingId(); + } + return null; + } + + public synchronized String getTokenTrackingId(TokenIdent identifier) { + DelegationTokenInformation info = currentTokens.get(identifier); + if (info == null) { + return null; + } + return info.getTrackingId(); + } + /** * Verifies that the given identifier and password are valid and match. * @param identifier Token identifier. @@ -359,8 +380,9 @@ extends AbstractDelegationTokenIdentifier> + " is trying to renew a token with " + "wrong password"); } long renewTime = Math.min(id.getMaxDate(), now + tokenRenewInterval); + String trackingId = getTrackingIdIfEnabled(id); DelegationTokenInformation info = new DelegationTokenInformation(renewTime, - password); + password, trackingId); if (currentTokens.get(id) == null) { throw new InvalidToken("Renewal request for unknown token"); @@ -420,9 +442,13 @@ extends AbstractDelegationTokenIdentifier> public static class DelegationTokenInformation { long renewDate; byte[] password; - public DelegationTokenInformation(long renewDate, byte[] password) { + String trackingId; + + public DelegationTokenInformation(long renewDate, byte[] password, + String trackingId) { this.renewDate = renewDate; this.password = password; + this.trackingId = trackingId; } /** returns renew date */ public long getRenewDate() { @@ -432,6 +458,10 @@ extends AbstractDelegationTokenIdentifier> byte[] getPassword() { return password; } + /** returns tracking id */ + public String getTrackingId() { + return trackingId; + } } /** Remove expired delegation tokens from cache */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ee6ff0f922a..ee45928cd21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -272,6 +272,8 @@ Release 2.3.0 - UNRELEASED HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs (Todd Lipcon via Colin Patrick McCabe) + HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b4d67ca19d2..a29121248c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -267,6 +267,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; public static final String DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers"; public static final String DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default"; + public static final String DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY = "dfs.namenode.audit.log.token.tracking.id"; + public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT = false; // Much code in hdfs is not yet updated to use these keys. public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 25fb25731f7..f233d1f5f72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -58,6 +58,15 @@ public class DelegationTokenSecretManager .getLog(DelegationTokenSecretManager.class); private final FSNamesystem namesystem; + + public DelegationTokenSecretManager(long delegationKeyUpdateInterval, + long delegationTokenMaxLifetime, long delegationTokenRenewInterval, + long delegationTokenRemoverScanInterval, FSNamesystem namesystem) { + this(delegationKeyUpdateInterval, delegationTokenMaxLifetime, + delegationTokenRenewInterval, delegationTokenRemoverScanInterval, false, + namesystem); + } + /** * Create a secret manager * @param delegationKeyUpdateInterval the number of seconds for rolling new @@ -67,13 +76,16 @@ public class DelegationTokenSecretManager * @param delegationTokenRenewInterval how often the tokens must be renewed * @param delegationTokenRemoverScanInterval how often the tokens are scanned * for expired tokens + * @param storeTokenTrackingId whether to store the token's tracking id */ public DelegationTokenSecretManager(long delegationKeyUpdateInterval, long delegationTokenMaxLifetime, long delegationTokenRenewInterval, - long delegationTokenRemoverScanInterval, FSNamesystem namesystem) { + long delegationTokenRemoverScanInterval, boolean storeTokenTrackingId, + FSNamesystem namesystem) { super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, delegationTokenRenewInterval, delegationTokenRemoverScanInterval); this.namesystem = namesystem; + this.storeTokenTrackingId = storeTokenTrackingId; } @Override //SecretManager @@ -184,7 +196,7 @@ public class DelegationTokenSecretManager } if (currentTokens.get(identifier) == null) { currentTokens.put(identifier, new DelegationTokenInformation(expiryTime, - password)); + password, getTrackingIdIfEnabled(identifier))); } else { throw new IOException( "Same delegation token being added twice; invalid entry in fsimage or editlogs"); @@ -223,7 +235,7 @@ public class DelegationTokenSecretManager byte[] password = createPassword(identifier.getBytes(), allKeys .get(keyId).getKey()); currentTokens.put(identifier, new DelegationTokenInformation(expiryTime, - password)); + password, getTrackingIdIfEnabled(identifier))); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 884a14b8959..dc733122c68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -36,6 +36,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KE import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY; @@ -220,6 +222,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; @@ -311,8 +315,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats, stat.getGroup(), symlink, path); } for (AuditLogger logger : auditLoggers) { - logger.logAuditEvent(succeeded, ugi.toString(), addr, - cmd, src, dst, status); + if (logger instanceof HdfsAuditLogger) { + HdfsAuditLogger hdfsLogger = (HdfsAuditLogger) logger; + hdfsLogger.logAuditEvent(succeeded, ugi.toString(), addr, cmd, src, dst, + status, ugi, dtSecretManager); + } else { + logger.logAuditEvent(succeeded, ugi.toString(), addr, + cmd, src, dst, status); + } } } @@ -5906,7 +5916,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT), conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT), - DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL, this); + DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL, + conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY, + DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT), + this); } /** @@ -6817,17 +6830,22 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * defined in the config file. It can also be explicitly listed in the * config file. */ - private static class DefaultAuditLogger implements AuditLogger { + private static class DefaultAuditLogger extends HdfsAuditLogger { + + private boolean logTokenTrackingId; @Override public void initialize(Configuration conf) { - // Nothing to do. + logTokenTrackingId = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY, + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT); } @Override public void logAuditEvent(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, - FileStatus status) { + FileStatus status, UserGroupInformation ugi, + DelegationTokenSecretManager dtSecretManager) { if (auditLog.isInfoEnabled()) { final StringBuilder sb = auditBuffer.get(); sb.setLength(0); @@ -6845,6 +6863,22 @@ public class FSNamesystem implements Namesystem, FSClusterStats, sb.append(status.getGroup()).append(":"); sb.append(status.getPermission()); } + if (logTokenTrackingId) { + sb.append("\t").append("trackingId="); + String trackingId = null; + if (ugi != null && dtSecretManager != null + && ugi.getAuthenticationMethod() == AuthenticationMethod.TOKEN) { + for (TokenIdentifier tid: ugi.getTokenIdentifiers()) { + if (tid instanceof DelegationTokenIdentifier) { + DelegationTokenIdentifier dtid = + (DelegationTokenIdentifier)tid; + trackingId = dtSecretManager.getTokenTrackingId(dtid); + break; + } + } + } + sb.append(trackingId); + } auditLog.info(sb); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java new file mode 100644 index 00000000000..1c2bc57e0ad --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.net.InetAddress; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Extension of {@link AuditLogger}. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class HdfsAuditLogger implements AuditLogger { + + @Override + public void logAuditEvent(boolean succeeded, String userName, + InetAddress addr, String cmd, String src, String dst, + FileStatus status) { + logAuditEvent(succeeded, userName, addr, cmd, src, dst, status, null, + null); + } + + /** + * Same as + * {@link #logAuditEvent(boolean, String, InetAddress, String, String, String, FileStatus)} + * with additional parameters related to logging delegation token tracking + * IDs. + * + * @param succeeded Whether authorization succeeded. + * @param userName Name of the user executing the request. + * @param addr Remote address of the request. + * @param cmd The requested command. + * @param src Path of affected source file. + * @param dst Path of affected destination file (if any). + * @param stat File information for operations that change the file's metadata + * (permissions, owner, times, etc). + * @param ugi UserGroupInformation of the current user, or null if not logging + * token tracking information + * @param dtSecretManager The token secret manager, or null if not logging + * token tracking information + */ + public abstract void logAuditEvent(boolean succeeded, String userName, + InetAddress addr, String cmd, String src, String dst, + FileStatus stat, UserGroupInformation ugi, + DelegationTokenSecretManager dtSecretManager); +} From b6d9ef18c4969eb1afe6f174780dd727e459c183 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 11 Sep 2013 21:17:30 +0000 Subject: [PATCH 07/16] Move HDFS-4680 in CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522049 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ee45928cd21..0627b0ea56e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -272,8 +272,6 @@ Release 2.3.0 - UNRELEASED HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs (Todd Lipcon via Colin Patrick McCabe) - HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) - OPTIMIZATIONS BUG FIXES @@ -369,6 +367,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5150. Allow per NN SPN for internal SPNEGO. (kihwal) + HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) + OPTIMIZATIONS BUG FIXES From c22091aecb802128b37c51501e40023532e3c62f Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Wed, 11 Sep 2013 22:00:08 +0000 Subject: [PATCH 08/16] YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include unhealthy nodes (Jonathan Eagles via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522062 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 6 ++++++ .../webapp/dao/ClusterMetricsInfo.java | 2 +- .../resourcemanager/webapp/TestRMWebServices.java | 14 +++++++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 270dd247e6a..779f9fcfed0 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -185,6 +185,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1025. ResourceManager and NodeManager do not load native libraries on Windows. (cnauroth) + YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include + unhealthy nodes (Jonathan Eagles via tgraves) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES @@ -1309,6 +1312,9 @@ Release 0.23.10 - UNRELEASED YARN-1101. Active nodes can be decremented below 0 (Robert Parker via tgraves) + YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include + unhealthy nodes (Jonathan Eagles via tgraves) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index 8266661954b..92c9678371b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -84,7 +84,7 @@ public class ClusterMetricsInfo { this.decommissionedNodes = clusterMetrics.getNumDecommisionedNMs(); this.rebootedNodes = clusterMetrics.getNumRebootedNMs(); this.totalNodes = activeNodes + lostNodes + decommissionedNodes - + rebootedNodes; + + rebootedNodes + unhealthyNodes; } public int getAppsSubmitted() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java index 4f6cbbec320..89bec21cc24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.webapp.WebServicesTestUtils; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.w3c.dom.Document; import org.w3c.dom.Element; @@ -109,6 +110,16 @@ public class TestRMWebServices extends JerseyTest { .contextPath("jersey-guice-filter").servletPath("/").build()); } + @BeforeClass + public static void initClusterMetrics() { + ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); + clusterMetrics.incrDecommisionedNMs(); + clusterMetrics.incrNumActiveNodes(); + clusterMetrics.incrNumLostNMs(); + clusterMetrics.incrNumRebootedNMs(); + clusterMetrics.incrNumUnhealthyNMs(); + } + @Test public void testInfoXML() throws JSONException, Exception { WebResource r = resource(); @@ -426,7 +437,8 @@ public class TestRMWebServices extends JerseyTest { "totalNodes doesn't match", clusterMetrics.getNumActiveNMs() + clusterMetrics.getNumLostNMs() + clusterMetrics.getNumDecommisionedNMs() - + clusterMetrics.getNumRebootedNMs(), totalNodes); + + clusterMetrics.getNumRebootedNMs() + + clusterMetrics.getUnhealthyNMs(), totalNodes); assertEquals("lostNodes doesn't match", clusterMetrics.getNumLostNMs(), lostNodes); assertEquals("unhealthyNodes doesn't match", From a202855af59d2eeb1c4a119bee6d4eb56565a9e9 Mon Sep 17 00:00:00 2001 From: Devarajulu K Date: Thu, 12 Sep 2013 14:32:20 +0000 Subject: [PATCH 09/16] MAPREDUCE-5164. mapred job and queue commands omit HADOOP_CLIENT_OPTS. Contributed by Nemon Lou. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522595 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ hadoop-mapreduce-project/bin/mapred | 2 ++ hadoop-mapreduce-project/bin/mapred.cmd | 2 ++ 3 files changed, 7 insertions(+) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 3dc14de172b..093273f0cb9 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -262,6 +262,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5020. Compile failure with JDK8 (Trevor Robinson via tgraves) + MAPREDUCE-5164. mapred job and queue commands omit HADOOP_CLIENT_OPTS + (Nemon Lou via devaraj) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred index 5509418c817..b95c7cd4f94 100755 --- a/hadoop-mapreduce-project/bin/mapred +++ b/hadoop-mapreduce-project/bin/mapred @@ -61,8 +61,10 @@ esac if [ "$COMMAND" = "job" ] ; then CLASS=org.apache.hadoop.mapred.JobClient + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "queue" ] ; then CLASS=org.apache.hadoop.mapred.JobQueueClient + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "pipes" ] ; then CLASS=org.apache.hadoop.mapred.pipes.Submitter HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" diff --git a/hadoop-mapreduce-project/bin/mapred.cmd b/hadoop-mapreduce-project/bin/mapred.cmd index d26b930e378..b2d53fa145e 100644 --- a/hadoop-mapreduce-project/bin/mapred.cmd +++ b/hadoop-mapreduce-project/bin/mapred.cmd @@ -103,10 +103,12 @@ goto :eof :job set CLASS=org.apache.hadoop.mapred.JobClient + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :queue set CLASS=org.apache.hadoop.mapred.JobQueueClient + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :sampler From f152a7e788013249b4782edf08b4b47dbef71036 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 12 Sep 2013 15:58:34 +0000 Subject: [PATCH 10/16] YARN-1078. TestNodeManagerResync, TestNodeManagerShutdown, and TestNodeStatusUpdater fail on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522644 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../nodemanager/TestNodeManagerShutdown.java | 4 ++- .../nodemanager/TestNodeStatusUpdater.java | 32 ++++++++++++------- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 779f9fcfed0..266faa54754 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -188,6 +188,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include unhealthy nodes (Jonathan Eagles via tgraves) + YARN-1078. TestNodeManagerResync, TestNodeManagerShutdown, and + TestNodeStatusUpdater fail on Windows. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index 6fcb1e0710a..4c9559d660a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -23,6 +23,7 @@ import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.util.ArrayList; @@ -163,7 +164,8 @@ public class TestNodeManagerShutdown { ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); - NodeId nodeId = BuilderUtils.newNodeId("localhost", 12345); + NodeId nodeId = BuilderUtils.newNodeId(InetAddress.getByName("localhost") + .getCanonicalHostName(), 12345); URL localResourceUri = ConverterUtils.getYarnUrlFromPath(localFS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 3fc5a2dddfb..4f7cd30ed66 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -23,7 +23,9 @@ import static org.mockito.Mockito.when; import java.io.File; import java.io.IOException; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; @@ -219,11 +221,11 @@ public class TestNodeStatusUpdater { Resource resource = BuilderUtils.newResource(2, 1); long currentTime = System.currentTimeMillis(); String user = "testUser"; - ContainerTokenIdentifier containerToken = - BuilderUtils.newContainerTokenIdentifier(BuilderUtils - .newContainerToken(firstContainerID, "localhost", 1234, user, - resource, currentTime + 10000, 123, "password".getBytes(), - currentTime)); + ContainerTokenIdentifier containerToken = BuilderUtils + .newContainerTokenIdentifier(BuilderUtils.newContainerToken( + firstContainerID, InetAddress.getByName("localhost") + .getCanonicalHostName(), 1234, user, resource, + currentTime + 10000, 123, "password".getBytes(), currentTime)); Container container = new ContainerImpl(conf, mockDispatcher, launchContext, null, mockMetrics, containerToken); @@ -250,11 +252,11 @@ public class TestNodeStatusUpdater { long currentTime = System.currentTimeMillis(); String user = "testUser"; Resource resource = BuilderUtils.newResource(3, 1); - ContainerTokenIdentifier containerToken = - BuilderUtils.newContainerTokenIdentifier(BuilderUtils - .newContainerToken(secondContainerID, "localhost", 1234, user, - resource, currentTime + 10000, 123, - "password".getBytes(), currentTime)); + ContainerTokenIdentifier containerToken = BuilderUtils + .newContainerTokenIdentifier(BuilderUtils.newContainerToken( + secondContainerID, InetAddress.getByName("localhost") + .getCanonicalHostName(), 1234, user, resource, + currentTime + 10000, 123, "password".getBytes(), currentTime)); Container container = new ContainerImpl(conf, mockDispatcher, launchContext, null, mockMetrics, containerToken); @@ -1290,9 +1292,15 @@ public class TestNodeStatusUpdater { private YarnConfiguration createNMConfig() { YarnConfiguration conf = new YarnConfiguration(); + String localhostAddress = null; + try { + localhostAddress = InetAddress.getByName("localhost").getCanonicalHostName(); + } catch (UnknownHostException e) { + Assert.fail("Unable to get localhost address: " + e.getMessage()); + } conf.setInt(YarnConfiguration.NM_PMEM_MB, 5 * 1024); // 5GB - conf.set(YarnConfiguration.NM_ADDRESS, "localhost:12345"); - conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "localhost:12346"); + conf.set(YarnConfiguration.NM_ADDRESS, localhostAddress + ":12345"); + conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, localhostAddress + ":12346"); conf.set(YarnConfiguration.NM_LOG_DIRS, logsDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogsDir.getAbsolutePath()); From a98637e6dd8a567c2718c954bce9a65abfa1c043 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Thu, 12 Sep 2013 20:05:08 +0000 Subject: [PATCH 11/16] HADOOP-9958. Add old constructor back to DelegationTokenInformation to unbreak downstream builds. (Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522707 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../delegation/AbstractDelegationTokenSecretManager.java | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index bc130ff364e..95da899553a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -454,6 +454,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9932. Improper synchronization in RetryCache. (kihwal) + HADOOP-9958. Add old constructor back to DelegationTokenInformation to + unbreak downstream builds. (Andrew Wang) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index a63e0628288..708700b9623 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -444,6 +444,10 @@ extends AbstractDelegationTokenIdentifier> byte[] password; String trackingId; + public DelegationTokenInformation(long renewDate, byte[] password) { + this(renewDate, password, null); + } + public DelegationTokenInformation(long renewDate, byte[] password, String trackingId) { this.renewDate = renewDate; From ede10b8a1f9a4d099c16469f827345cb359cef3d Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Fri, 13 Sep 2013 04:21:46 +0000 Subject: [PATCH 12/16] HDFS-5067 Support symlink operations in NFS gateway. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522774 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/mount/MountResponse.java | 3 +- .../apache/hadoop/nfs/nfs3/IdUserGroup.java | 4 + .../hadoop/nfs/nfs3/Nfs3FileAttributes.java | 11 +- .../nfs/nfs3/request/SYMLINK3Request.java | 4 +- .../nfs/nfs3/response/READLINK3Response.java | 2 +- .../nfs/nfs3/response/SYMLINK3Response.java | 2 +- .../hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 7 +- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 131 +++++++++++++++++- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + 9 files changed, 146 insertions(+), 20 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index a9131e3b509..dd837f54121 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -40,8 +40,7 @@ public class MountResponse { RpcAcceptedReply.voidReply(xdr, xid); xdr.writeInt(status); if (status == MNT_OK) { - xdr.writeInt(handle.length); - xdr.writeFixedOpaque(handle); + xdr.writeVariableOpaque(handle); // Only MountV3 returns a list of supported authFlavors xdr.writeInt(1); xdr.writeInt(AuthFlavor.AUTH_SYS.getValue()); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java index c0be5dd2890..e034c664059 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java @@ -149,6 +149,8 @@ public class IdUserGroup { checkAndUpdateMaps(); String uname = uidNameMap.get(uid); if (uname == null) { + LOG.warn("Can't find user name for uid " + uid + + ". Use default user name " + unknown); uname = unknown; } return uname; @@ -158,6 +160,8 @@ public class IdUserGroup { checkAndUpdateMaps(); String gname = gidNameMap.get(gid); if (gname == null) { + LOG.warn("Can't find group name for gid " + gid + + ". Use default group name " + unknown); gname = unknown; } return gname; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java index 1ba727b4474..f8ce1fad889 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3FileAttributes.java @@ -72,19 +72,18 @@ public class Nfs3FileAttributes { } public Nfs3FileAttributes() { - this(false, 0, (short)0, 0, 0, 0, 0, 0, 0, 0); + this(NfsFileType.NFSREG, 0, (short)0, 0, 0, 0, 0, 0, 0, 0); } - public Nfs3FileAttributes(boolean isDir, int nlink, short mode, int uid, + public Nfs3FileAttributes(NfsFileType nfsType, int nlink, short mode, int uid, int gid, long size, long fsid, long fileid, long mtime, long atime) { - this.type = isDir ? NfsFileType.NFSDIR.toValue() : NfsFileType.NFSREG - .toValue(); + this.type = nfsType.toValue(); this.mode = mode; - this.nlink = isDir ? (nlink + 2) : 1; + this.nlink = (type == NfsFileType.NFSDIR.toValue()) ? (nlink + 2) : 1; this.uid = uid; this.gid = gid; this.size = size; - if(isDir) { + if(type == NfsFileType.NFSDIR.toValue()) { this.size = getDirSize(nlink); } this.used = this.size; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java index 6e9f641cc2f..6e74d1aa61b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/request/SYMLINK3Request.java @@ -25,9 +25,9 @@ import org.apache.hadoop.oncrpc.XDR; * SYMLINK3 Request */ public class SYMLINK3Request extends RequestWithHandle { - private final String name; + private final String name; // The name of the link private final SetAttr3 symAttr; - private final String symData; + private final String symData; // It contains the target public SYMLINK3Request(XDR xdr) throws IOException { super(xdr); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java index c86f051ece4..758895c588f 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READLINK3Response.java @@ -46,7 +46,7 @@ public class READLINK3Response extends NFS3Response { out.writeBoolean(true); // Attribute follows postOpSymlinkAttr.serialize(out); if (getStatus() == Nfs3Status.NFS3_OK) { - out.writeFixedOpaque(path, path.length); + out.writeVariableOpaque(path); } return out; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java index 76b937b1e56..a1d245c8eac 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/SYMLINK3Response.java @@ -60,9 +60,9 @@ public class SYMLINK3Response extends NFS3Response { if (this.getStatus() == Nfs3Status.NFS3_OK) { out.writeBoolean(true); objFileHandle.serialize(out); + out.writeBoolean(true); objPostOpAttr.serialize(out); } - out.writeBoolean(true); dirWcc.serialize(out); return out; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java index 592106f5aa7..c8509eff6ac 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java @@ -49,7 +49,7 @@ public class Nfs3Utils { public static HdfsFileStatus getFileStatus(DFSClient client, String fileIdPath) throws IOException { - return client.getFileInfo(fileIdPath); + return client.getFileLinkInfo(fileIdPath); } public static Nfs3FileAttributes getNfs3FileAttrFromFileStatus( @@ -59,7 +59,10 @@ public class Nfs3Utils { * client takes only the lower 32bit of the fileId and treats it as signed * int. When the 32th bit is 1, the client considers it invalid. */ - return new Nfs3FileAttributes(fs.isDir(), fs.getChildrenNum(), fs + NfsFileType fileType = fs.isDir() ? NfsFileType.NFSDIR : NfsFileType.NFSREG; + fileType = fs.isSymlink() ? NfsFileType.NFSLNK : fileType; + + return new Nfs3FileAttributes(fileType, fs.getChildrenNum(), fs .getPermission().toShort(), iug.getUidAllowingUnknown(fs.getOwner()), iug.getGidAllowingUnknown(fs.getGroup()), fs.getLen(), 0 /* fsid */, fs.getFileId(), fs.getModificationTime(), fs.getAccessTime()); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index b935119b6be..b6a9b988097 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.nfs.nfs3; import java.io.File; +import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; @@ -44,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.nfs.AccessPrivilege; import org.apache.hadoop.nfs.NfsExports; +import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.NfsTime; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.IdUserGroup; @@ -65,10 +67,12 @@ import org.apache.hadoop.nfs.nfs3.request.PATHCONF3Request; import org.apache.hadoop.nfs.nfs3.request.READ3Request; import org.apache.hadoop.nfs.nfs3.request.READDIR3Request; import org.apache.hadoop.nfs.nfs3.request.READDIRPLUS3Request; +import org.apache.hadoop.nfs.nfs3.request.READLINK3Request; import org.apache.hadoop.nfs.nfs3.request.REMOVE3Request; import org.apache.hadoop.nfs.nfs3.request.RENAME3Request; import org.apache.hadoop.nfs.nfs3.request.RMDIR3Request; import org.apache.hadoop.nfs.nfs3.request.SETATTR3Request; +import org.apache.hadoop.nfs.nfs3.request.SYMLINK3Request; import org.apache.hadoop.nfs.nfs3.request.SetAttr3; import org.apache.hadoop.nfs.nfs3.request.SetAttr3.SetAttrField; import org.apache.hadoop.nfs.nfs3.request.WRITE3Request; @@ -476,9 +480,70 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public READLINK3Response readlink(XDR xdr, - SecurityHandler securityHandler, InetAddress client) { - return new READLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); + public READLINK3Response readlink(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { + READLINK3Response response = new READLINK3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); + if (dfsClient == null) { + response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); + return response; + } + + READLINK3Request request = null; + + try { + request = new READLINK3Request(xdr); + } catch (IOException e) { + LOG.error("Invalid READLINK request"); + return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL); + } + + FileHandle handle = request.getHandle(); + if (LOG.isDebugEnabled()) { + LOG.debug("NFS READLINK fileId: " + handle.getFileId()); + } + + String fileIdPath = Nfs3Utils.getFileIdPath(handle); + try { + String target = dfsClient.getLinkTarget(fileIdPath); + + Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient, + fileIdPath, iug); + if (postOpAttr == null) { + LOG.info("Can't get path for fileId:" + handle.getFileId()); + return new READLINK3Response(Nfs3Status.NFS3ERR_STALE); + } + if (postOpAttr.getType() != NfsFileType.NFSLNK.toValue()) { + LOG.error("Not a symlink, fileId:" + handle.getFileId()); + return new READLINK3Response(Nfs3Status.NFS3ERR_INVAL); + } + if (target == null) { + LOG.error("Symlink target should not be null, fileId:" + + handle.getFileId()); + return new READLINK3Response(Nfs3Status.NFS3ERR_SERVERFAULT); + } + if (MAX_READ_TRANSFER_SIZE < target.getBytes().length) { + return new READLINK3Response(Nfs3Status.NFS3ERR_IO, postOpAttr, null); + } + + return new READLINK3Response(Nfs3Status.NFS3_OK, postOpAttr, + target.getBytes()); + + } catch (IOException e) { + LOG.warn("Readlink error: " + e.getClass(), e); + if (e instanceof FileNotFoundException) { + return new READLINK3Response(Nfs3Status.NFS3ERR_STALE); + } else if (e instanceof AccessControlException) { + return new READLINK3Response(Nfs3Status.NFS3ERR_ACCES); + } + return new READLINK3Response(Nfs3Status.NFS3ERR_IO); + } } @Override @@ -1121,9 +1186,63 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public SYMLINK3Response symlink(XDR xdr, - SecurityHandler securityHandler, InetAddress client) { - return new SYMLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); + public SYMLINK3Response symlink(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { + SYMLINK3Response response = new SYMLINK3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); + if (dfsClient == null) { + response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); + return response; + } + + SYMLINK3Request request = null; + try { + request = new SYMLINK3Request(xdr); + } catch (IOException e) { + LOG.error("Invalid SYMLINK request"); + response.setStatus(Nfs3Status.NFS3ERR_INVAL); + return response; + } + + FileHandle dirHandle = request.getHandle(); + String name = request.getName(); + String symData = request.getSymData(); + String linkDirIdPath = Nfs3Utils.getFileIdPath(dirHandle); + // Don't do any name check to source path, just leave it to HDFS + String linkIdPath = linkDirIdPath + "/" + name; + if (LOG.isDebugEnabled()) { + LOG.debug("NFS SYMLINK, target: " + symData + " link: " + linkIdPath); + } + + try { + WccData dirWcc = response.getDirWcc(); + WccAttr preOpAttr = Nfs3Utils.getWccAttr(dfsClient, linkDirIdPath); + dirWcc.setPreOpAttr(preOpAttr); + + dfsClient.createSymlink(symData, linkIdPath, false); + // Set symlink attr is considered as to change the attr of the target + // file. So no need to set symlink attr here after it's created. + + HdfsFileStatus linkstat = dfsClient.getFileLinkInfo(linkIdPath); + Nfs3FileAttributes objAttr = Nfs3Utils.getNfs3FileAttrFromFileStatus( + linkstat, iug); + dirWcc + .setPostOpAttr(Nfs3Utils.getFileAttr(dfsClient, linkDirIdPath, iug)); + + return new SYMLINK3Response(Nfs3Status.NFS3_OK, new FileHandle( + objAttr.getFileid()), objAttr, dirWcc); + + } catch (IOException e) { + LOG.warn("Exception:" + e); + response.setStatus(Nfs3Status.NFS3ERR_IO); + return response; + } } public READDIR3Response link(XDR xdr, SecurityHandler securityHandler, InetAddress client) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0627b0ea56e..9c79d8dcb70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -333,6 +333,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5085. Refactor o.a.h.nfs to support different types of authentications. (jing9) + HDFS-5067 Support symlink operations in NFS gateway. (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 965ce2041a7adfbbe5fc8b77a85c0d1179ef20df Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 13 Sep 2013 04:29:50 +0000 Subject: [PATCH 13/16] HDFS-5192. NameNode may fail to start when dfs.client.test.drop.namenode.response.number is set. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522775 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSClient.java | 14 +++-- .../apache/hadoop/hdfs/NameNodeProxies.java | 10 ++-- .../ha/TestLossyRetryInvocationHandler.java | 57 +++++++++++++++++++ 4 files changed, 74 insertions(+), 10 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9c79d8dcb70..f8ac7928388 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -439,6 +439,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5159. Secondary NameNode fails to checkpoint if error occurs downloading edits on first checkpoint. (atm) + HDFS-5192. NameNode may fail to start when + dfs.client.test.drop.namenode.response.number is set. (jing9) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index d1cc7848916..23e4ec01052 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -484,14 +484,17 @@ public class DFSClient implements java.io.Closeable { int numResponseToDrop = conf.getInt( DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); + NameNodeProxies.ProxyAndInfo proxyInfo = null; if (numResponseToDrop > 0) { // This case is used for testing. LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + " is set to " + numResponseToDrop + ", this hacked client will proactively drop responses"); - NameNodeProxies.ProxyAndInfo proxyInfo = NameNodeProxies - .createProxyWithLossyRetryHandler(conf, nameNodeUri, - ClientProtocol.class, numResponseToDrop); + proxyInfo = NameNodeProxies.createProxyWithLossyRetryHandler(conf, + nameNodeUri, ClientProtocol.class, numResponseToDrop); + } + + if (proxyInfo != null) { this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } else if (rpcNamenode != null) { @@ -502,9 +505,8 @@ public class DFSClient implements java.io.Closeable { } else { Preconditions.checkArgument(nameNodeUri != null, "null URI"); - NameNodeProxies.ProxyAndInfo proxyInfo = - NameNodeProxies.createProxy(conf, nameNodeUri, ClientProtocol.class); - + proxyInfo = NameNodeProxies.createProxy(conf, nameNodeUri, + ClientProtocol.class); this.dtService = proxyInfo.getDelegationTokenService(); this.namenode = proxyInfo.getProxy(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index 41dac6a80f6..bff0284c74f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -158,8 +158,8 @@ public class NameNodeProxies { * Generate a dummy namenode proxy instance that utilizes our hacked * {@link LossyRetryInvocationHandler}. Proxy instance generated using this * method will proactively drop RPC responses. Currently this method only - * support HA setup. IllegalStateException will be thrown if the given - * configuration is not for HA. + * support HA setup. null will be returned if the given configuration is not + * for HA. * * @param config the configuration containing the required IPC * properties, client failover configurations, etc. @@ -168,7 +168,8 @@ public class NameNodeProxies { * @param xface the IPC interface which should be created * @param numResponseToDrop The number of responses to drop for each RPC call * @return an object containing both the proxy and the associated - * delegation token service it corresponds to + * delegation token service it corresponds to. Will return null of the + * given configuration does not support HA. * @throws IOException if there is an error creating the proxy */ @SuppressWarnings("unchecked") @@ -204,8 +205,9 @@ public class NameNodeProxies { Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); return new ProxyAndInfo(proxy, dtService); } else { - throw new IllegalStateException("Currently creating proxy using " + + LOG.warn("Currently creating proxy using " + "LossyRetryInvocationHandler requires NN HA setup"); + return null; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java new file mode 100644 index 00000000000..9434392ccb8 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestLossyRetryInvocationHandler.java @@ -0,0 +1,57 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.ha; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.junit.Test; + +/** + * This test makes sure that when + * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} is set, + * DFSClient instances can still be created within NN/DN (e.g., the fs instance + * used by the trash emptier thread in NN) + */ +public class TestLossyRetryInvocationHandler { + + @Test + public void testStartNNWithTrashEmptier() throws Exception { + MiniDFSCluster cluster = null; + Configuration conf = new HdfsConfiguration(); + + // enable both trash emptier and dropping response + conf.setLong("fs.trash.interval", 360); + conf.setInt(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, 2); + + try { + cluster = new MiniDFSCluster.Builder(conf) + .nnTopology(MiniDFSNNTopology.simpleHATopology()).numDataNodes(0) + .build(); + cluster.waitActive(); + cluster.transitionToActive(0); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } + +} From 4857e76579368a3af3b713034c8ea67176ff6356 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Fri, 13 Sep 2013 11:04:16 +0000 Subject: [PATCH 14/16] HADOOP-9350. Hadoop not building against Java7 on OSX git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522867 13f79535-47bb-0310-9956-ffa450edef68 --- BUILDING.txt | 6 +++++- hadoop-common-project/hadoop-annotations/pom.xml | 15 +++++++++++++++ hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/BUILDING.txt b/BUILDING.txt index ff6dea26ad4..c9415dcedef 100644 --- a/BUILDING.txt +++ b/BUILDING.txt @@ -153,9 +153,13 @@ Building on OS/X ---------------------------------------------------------------------------------- -Hadoop does not build on OS/X with Java 7. +A one-time manual step is required to enable building Hadoop OS X with Java 7 +every time the JDK is updated. see: https://issues.apache.org/jira/browse/HADOOP-9350 +$ sudo mkdir `/usr/libexec/java_home`/Classes +$ sudo ln -s `/usr/libexec/java_home`/lib/tools.jar `/usr/libexec/java_home`/Classes/classes.jar + ---------------------------------------------------------------------------------- Building on Windows diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml index ea1599b8733..4428cacd290 100644 --- a/hadoop-common-project/hadoop-annotations/pom.xml +++ b/hadoop-common-project/hadoop-annotations/pom.xml @@ -56,6 +56,21 @@ + + jdk1.7 + + 1.7 + + + + jdk.tools + jdk.tools + 1.7 + system + ${java.home}/../lib/tools.jar + + + diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 95da899553a..85000b639f0 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -363,6 +363,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9908. Fix NPE when versioninfo properties file is missing (todd) + HADOOP-9350. Hadoop not building against Java7 on OSX + (Robert Kanter via stevel) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES From 2950c9ff6e309ec1c9cf803a1f55f7a01197cd67 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Fri, 13 Sep 2013 15:14:07 +0000 Subject: [PATCH 15/16] YARN-1194. TestContainerLogsPage fails with native builds. Contributed by Roman Shaposhnik git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522968 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../nodemanager/webapp/TestContainerLogsPage.java | 13 +++++++++++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 266faa54754..df573f52cfc 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -191,6 +191,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1078. TestNodeManagerResync, TestNodeManagerShutdown, and TestNodeStatusUpdater fail on Windows. (Chuan Liu via cnauroth) + YARN-1194. TestContainerLogsPage fails with native builds (Roman Shaposhnik + via jlowe) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java index edd7cfd6610..6c21ff72cb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java @@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; @@ -50,6 +51,7 @@ import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; import org.apache.hadoop.yarn.server.nodemanager.webapp.ContainerLogsPage.ContainersLogsBlock; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.webapp.YarnWebParams; @@ -151,8 +153,15 @@ public class TestContainerLogsPage { new ConcurrentHashMap(); appMap.put(appId, app); when(context.getApplications()).thenReturn(appMap); - when(context.getContainers()).thenReturn( - new ConcurrentHashMap()); + ConcurrentHashMap containers = + new ConcurrentHashMap(); + when(context.getContainers()).thenReturn(containers); + when(context.getLocalDirsHandler()).thenReturn(dirsHandler); + + MockContainer container = new MockContainer(appAttemptId, + new AsyncDispatcher(), conf, user, appId, 1); + container.setState(ContainerState.RUNNING); + context.getContainers().put(container1, container); ContainersLogsBlock cLogsBlock = new ContainersLogsBlock(context); From 1260d598d352a00e51b27d44e6c77e73fa18b1a4 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 13 Sep 2013 20:41:11 +0000 Subject: [PATCH 16/16] HDFS-4096. Add snapshot information to namenode WebUI. Contributed by Haohui Mai. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1523091 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSNamesystem.java | 9 +++ .../server/namenode/NamenodeJspHelper.java | 11 +++ .../namenode/metrics/FSNamesystemMBean.java | 5 ++ .../src/main/webapps/hdfs/dfshealth.jsp | 5 +- .../namenode/TestFSNamesystemMBean.java | 72 +++++++++++++++++++ 6 files changed, 104 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f8ac7928388..7dd9a126d75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -272,6 +272,9 @@ Release 2.3.0 - UNRELEASED HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs (Todd Lipcon via Colin Patrick McCabe) + HDFS-4096. Add snapshot information to namenode WebUI. (Haohui Mai via + jing9) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index dc733122c68..f4f8ccedec0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4199,6 +4199,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return this.snapshotManager.getNumSnapshots(); } + @Override + public String getSnapshotStats() { + Map info = new HashMap(); + info.put("SnapshottableDirectories", this.getNumSnapshottableDirs()); + info.put("Snapshots", this.getNumSnapshots()); + return JSON.toString(info); + } + + int getNumberOfDatanodes(DatanodeReportType type) { readLock(); try { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 36163c7d0bf..e2aa4fcf7c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -203,6 +203,17 @@ class NamenodeJspHelper { return ""; } + static void generateSnapshotReport(JspWriter out, FSNamesystem fsn) + throws IOException { + out.println("
" + + "\n" + + "" + + ""); + + out.println(String.format("", fsn.getNumSnapshottableDirs(), fsn.getNumSnapshots())); + out.println("
Snapshottable directoriesSnapshotted directories
%d%d
"); + } + static class HealthJsp { private int rowNum = 0; private int colNum = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java index abc65ab51e9..4835fd86be3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java @@ -130,4 +130,9 @@ public interface FSNamesystemMBean { * @return number of decommissioned dead data nodes */ public int getNumDecomDeadDataNodes(); + + /** + * The statistics of snapshots + */ + public String getSnapshotStats(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp index 0b0091e10d6..36f6199d978 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp @@ -73,7 +73,10 @@ <% healthjsp.generateJournalReport(out, nn, request); %>
<% healthjsp.generateConfReport(out, nn, request); %> -
+
+

Snapshot Summary

+<% NamenodeJspHelper.generateSnapshotReport(out, fsn); %> +

Startup Progress

<% healthjsp.generateStartupProgress(out, nn.getStartupProgress()); %> <% diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java new file mode 100644 index 00000000000..dcfd83ded68 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java @@ -0,0 +1,72 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.junit.Assert.assertTrue; + +import java.lang.management.ManagementFactory; +import java.util.Map; + +import javax.management.MBeanServer; +import javax.management.ObjectName; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.junit.Test; +import org.mortbay.util.ajax.JSON; + +/** + * Class for testing {@link NameNodeMXBean} implementation + */ +public class TestFSNamesystemMBean { + + @Test + public void test() throws Exception { + Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + + try { + cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + + FSNamesystem fsn = cluster.getNameNode().namesystem; + + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=NameNode,name=FSNamesystemState"); + + String snapshotStats = (String) (mbs.getAttribute(mxbeanName, + "SnapshotStats")); + + @SuppressWarnings("unchecked") + Map stat = (Map) JSON + .parse(snapshotStats); + + assertTrue(stat.containsKey("SnapshottableDirectories") + && (Long) stat.get("SnapshottableDirectories") == fsn + .getNumSnapshottableDirs()); + assertTrue(stat.containsKey("Snapshots") + && (Long) stat.get("Snapshots") == fsn.getNumSnapshots()); + + } finally { + if (cluster != null) { + cluster.shutdown(); + } + } + } +}