From b32ace11f1fe4540767ee69f74e321977a9ae37a Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Thu, 15 Aug 2013 00:45:48 +0000 Subject: [PATCH 001/153] HDFS-5051. nn fails to download checkpointed image from snn in some setups. Contributed by Vinay and Suresh Srinivas. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514110 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/GetImageServlet.java | 20 +++++++++++++------ 2 files changed, 17 insertions(+), 6 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5f2fb795a39..66eab12040d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -315,6 +315,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5091. Support for spnego keytab separate from the JournalNode keytab for secure HA. (jing9) + HDFS-5051. nn fails to download checkpointed image from snn in some + setups. (Vinay and suresh via suresh) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java index dfe1c6af69a..35c3cfb2884 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java @@ -310,11 +310,14 @@ public class GetImageServlet extends HttpServlet { static String getParamStringToPutImage(long txid, InetSocketAddress imageListenAddress, Storage storage) { - + String machine = !imageListenAddress.isUnresolved() + && imageListenAddress.getAddress().isAnyLocalAddress() ? null + : imageListenAddress.getHostName(); return "putimage=1" + "&" + TXID_PARAM + "=" + txid + "&port=" + imageListenAddress.getPort() + - "&" + STORAGEINFO_PARAM + "=" + + (machine != null ? "&machine=" + machine : "") + + "&" + STORAGEINFO_PARAM + "=" + storage.toColonSeparatedString(); } @@ -341,10 +344,6 @@ public class GetImageServlet extends HttpServlet { Map pmap = request.getParameterMap(); isGetImage = isGetEdit = isPutImage = fetchLatest = false; remoteport = 0; - machineName = request.getRemoteHost(); - if (InetAddresses.isInetAddress(machineName)) { - machineName = NetUtils.getHostNameOfIP(machineName); - } for (Map.Entry entry : pmap.entrySet()) { String key = entry.getKey(); @@ -369,11 +368,20 @@ public class GetImageServlet extends HttpServlet { txId = ServletUtil.parseLongParam(request, TXID_PARAM); } else if (key.equals("port")) { remoteport = new Integer(val[0]).intValue(); + } else if (key.equals("machine")) { + machineName = val[0]; } else if (key.equals(STORAGEINFO_PARAM)) { storageInfoString = val[0]; } } + if (machineName == null) { + machineName = request.getRemoteHost(); + if (InetAddresses.isInetAddress(machineName)) { + machineName = NetUtils.getHostNameOfIP(machineName); + } + } + int numGets = (isGetImage?1:0) + (isGetEdit?1:0); if ((numGets > 1) || (numGets == 0) && !isPutImage) { throw new IOException("Illegal parameters to TransferFsImage"); From 9cf82b6a7b5742802c451e53af7ec718f74ee58f Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Thu, 15 Aug 2013 01:12:21 +0000 Subject: [PATCH 002/153] HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options. Contributed by Konstantin Shvachko. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514114 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../namenode/NNThroughputBenchmark.java | 72 +++++++++++++------ 2 files changed, 55 insertions(+), 20 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 66eab12040d..c6f160d263f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -256,6 +256,9 @@ Release 2.3.0 - UNRELEASED HDFS-4817. Make HDFS advisory caching configurable on a per-file basis. (Colin Patrick McCabe) + HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options. + (shv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java index 77f8560816f..3156de4e93a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java @@ -61,6 +61,8 @@ import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.security.Groups; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; +import org.apache.hadoop.util.Tool; +import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.util.VersionInfo; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -100,7 +102,7 @@ import org.apache.log4j.LogManager; * Then the benchmark executes the specified number of operations using * the specified number of threads and outputs the resulting stats. */ -public class NNThroughputBenchmark { +public class NNThroughputBenchmark implements Tool { private static final Log LOG = LogFactory.getLog(NNThroughputBenchmark.class); private static final int BLOCK_SIZE = 16; private static final String GENERAL_OPTIONS_USAGE = @@ -115,6 +117,8 @@ public class NNThroughputBenchmark { // We do not need many handlers, since each thread simulates a handler // by calling name-node methods directly config.setInt(DFSConfigKeys.DFS_DATANODE_HANDLER_COUNT_KEY, 1); + // Turn off minimum block size verification + config.setInt(DFSConfigKeys.DFS_NAMENODE_MIN_BLOCK_SIZE_KEY, 0); // set exclude file config.set(DFSConfigKeys.DFS_HOSTS_EXCLUDE, "${hadoop.tmp.dir}/dfs/hosts/exclude"); @@ -129,14 +133,11 @@ public class NNThroughputBenchmark { config.set(DFSConfigKeys.DFS_HOSTS, "${hadoop.tmp.dir}/dfs/hosts/include"); File includeFile = new File(config.get(DFSConfigKeys.DFS_HOSTS, "include")); new FileOutputStream(includeFile).close(); - // Start the NameNode - String[] argv = new String[] {}; - nameNode = NameNode.createNameNode(argv, config); - nameNodeProto = nameNode.getRpcServer(); } void close() { - nameNode.stop(); + if(nameNode != null) + nameNode.stop(); } static void setNameNodeLoggingLevel(Level logLevel) { @@ -1290,52 +1291,69 @@ public class NNThroughputBenchmark { System.exit(-1); } + public static void runBenchmark(Configuration conf, List args) + throws Exception { + NNThroughputBenchmark bench = null; + try { + bench = new NNThroughputBenchmark(conf); + bench.run(args.toArray(new String[]{})); + } finally { + if(bench != null) + bench.close(); + } + } + /** * Main method of the benchmark. * @param args command line parameters */ - public static void runBenchmark(Configuration conf, List args) throws Exception { + @Override // Tool + public int run(String[] aArgs) throws Exception { + List args = new ArrayList(Arrays.asList(aArgs)); if(args.size() < 2 || ! args.get(0).startsWith("-op")) printUsage(); String type = args.get(1); boolean runAll = OperationStatsBase.OP_ALL_NAME.equals(type); - NNThroughputBenchmark bench = null; + // Start the NameNode + String[] argv = new String[] {}; + nameNode = NameNode.createNameNode(argv, config); + nameNodeProto = nameNode.getRpcServer(); + List ops = new ArrayList(); OperationStatsBase opStat = null; try { - bench = new NNThroughputBenchmark(conf); if(runAll || CreateFileStats.OP_CREATE_NAME.equals(type)) { - opStat = bench.new CreateFileStats(args); + opStat = new CreateFileStats(args); ops.add(opStat); } if(runAll || OpenFileStats.OP_OPEN_NAME.equals(type)) { - opStat = bench.new OpenFileStats(args); + opStat = new OpenFileStats(args); ops.add(opStat); } if(runAll || DeleteFileStats.OP_DELETE_NAME.equals(type)) { - opStat = bench.new DeleteFileStats(args); + opStat = new DeleteFileStats(args); ops.add(opStat); } if(runAll || FileStatusStats.OP_FILE_STATUS_NAME.equals(type)) { - opStat = bench.new FileStatusStats(args); + opStat = new FileStatusStats(args); ops.add(opStat); } if(runAll || RenameFileStats.OP_RENAME_NAME.equals(type)) { - opStat = bench.new RenameFileStats(args); + opStat = new RenameFileStats(args); ops.add(opStat); } if(runAll || BlockReportStats.OP_BLOCK_REPORT_NAME.equals(type)) { - opStat = bench.new BlockReportStats(args); + opStat = new BlockReportStats(args); ops.add(opStat); } if(runAll || ReplicationStats.OP_REPLICATION_NAME.equals(type)) { - opStat = bench.new ReplicationStats(args); + opStat = new ReplicationStats(args); ops.add(opStat); } if(runAll || CleanAllStats.OP_CLEAN_NAME.equals(type)) { - opStat = bench.new CleanAllStats(args); + opStat = new CleanAllStats(args); ops.add(opStat); } if(ops.size() == 0) @@ -1354,14 +1372,28 @@ public class NNThroughputBenchmark { } catch(Exception e) { LOG.error(StringUtils.stringifyException(e)); throw e; + } + return 0; + } + + public static void main(String[] args) throws Exception { + NNThroughputBenchmark bench = null; + try { + bench = new NNThroughputBenchmark(new HdfsConfiguration()); + ToolRunner.run(bench, args); } finally { if(bench != null) bench.close(); } } - public static void main(String[] args) throws Exception { - runBenchmark(new HdfsConfiguration(), - new ArrayList(Arrays.asList(args))); + @Override // Configurable + public void setConf(Configuration conf) { + config = conf; + } + + @Override // Configurable + public Configuration getConf() { + return config; } } From 4c8db6009291001b685b63f05b59a084972df8d4 Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Thu, 15 Aug 2013 01:29:16 +0000 Subject: [PATCH 003/153] HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from DatanodeProtocolProtos. Contributed by Tao Luo. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514118 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java | 5 +++-- .../hadoop-hdfs/src/main/proto/DatanodeProtocol.proto | 7 ++----- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c6f160d263f..4d62131b8cb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -6,6 +6,9 @@ Trunk (Unreleased) HDFS-3034. Remove the deprecated DFSOutputStream.sync() method. (szetszwo) + HDFS-5079. Cleaning up NNHAStatusHeartbeat.State from + DatanodeProtocolProtos. (Tao Luo via shv) + NEW FEATURES HDFS-3125. Add JournalService to enable Journal Daemon. (suresh) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java index 613edb1fa18..a3b60a765df 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FsServerDefaults; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; +import org.apache.hadoop.ha.proto.HAServiceProtocolProtos; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.ClientProtocol; @@ -1311,10 +1312,10 @@ public class PBHelper { NNHAStatusHeartbeatProto.newBuilder(); switch (hb.getState()) { case ACTIVE: - builder.setState(NNHAStatusHeartbeatProto.State.ACTIVE); + builder.setState(HAServiceProtocolProtos.HAServiceStateProto.ACTIVE); break; case STANDBY: - builder.setState(NNHAStatusHeartbeatProto.State.STANDBY); + builder.setState(HAServiceProtocolProtos.HAServiceStateProto.STANDBY); break; default: throw new IllegalArgumentException("Unexpected NNHAStatusHeartbeat.State:" + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto index 3b9b90b5d27..bc5461be567 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/DatanodeProtocol.proto @@ -31,6 +31,7 @@ option java_generic_services = true; option java_generate_equals_and_hash = true; package hadoop.hdfs; +import "HAServiceProtocol.proto"; import "hdfs.proto"; /** @@ -185,11 +186,7 @@ message StorageReportProto { * txid - Highest transaction ID this NN has seen */ message NNHAStatusHeartbeatProto { - enum State { - ACTIVE = 0; - STANDBY = 1; - } - required State state = 1; + required hadoop.common.HAServiceStateProto state = 1; required uint64 txid = 2; } From 472d8bc9839b9cbe262e41743f40b0cb4912f5f2 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Thu, 15 Aug 2013 02:35:48 +0000 Subject: [PATCH 004/153] YARN-1056. Remove dual use of string 'resourcemanager' in yarn.resourcemanager.connect.{max.wait.secs|retry_interval.secs}. Contributed by Karthik Kambatla. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514135 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 +++ .../hadoop/yarn/conf/YarnConfiguration.java | 24 +++++++------- .../apache/hadoop/yarn/client/RMProxy.java | 24 +++++--------- .../src/main/resources/yarn-default.xml | 16 ++++++++- .../nodemanager/TestNodeStatusUpdater.java | 33 +++++++++---------- 5 files changed, 55 insertions(+), 46 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 457ec5fdac9..92744c0537b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -875,6 +875,10 @@ Release 2.1.0-beta - 2013-08-06 YARN-1043. Push all metrics consistently. (Jian He via acmurthy) + YARN-1056. Remove dual use of string 'resourcemanager' in + yarn.resourcemanager.connect.{max.wait.secs|retry_interval.secs} + (Karthik Kambatla via acmurthy) + Release 2.0.5-alpha - 06/06/2013 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 065fb6344c6..ec9eb19c4f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -259,7 +259,7 @@ public class YarnConfiguration extends Configuration { /** URI for FileSystemRMStateStore */ public static final String FS_RM_STATE_STORE_URI = - RM_PREFIX + "fs.rm-state-store.uri"; + RM_PREFIX + "fs.state-store.uri"; /** The maximum number of completed applications RM keeps. */ public static final String RM_MAX_COMPLETED_APPLICATIONS = @@ -655,19 +655,17 @@ public class YarnConfiguration extends Configuration { public static final long DEFAULT_NM_PROCESS_KILL_WAIT_MS = 2000; - /** Max time to wait to establish a connection to RM - */ - public static final String RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS = - RM_PREFIX + "resourcemanager.connect.max.wait.secs"; - public static final int DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS = - 15*60; + /** Max time to wait to establish a connection to RM */ + public static final String RESOURCEMANAGER_CONNECT_MAX_WAIT_MS = + RM_PREFIX + "connect.max-wait.ms"; + public static final int DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS = + 15 * 60 * 1000; - /** Time interval between each attempt to connect to RM - */ - public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS = - RM_PREFIX + "resourcemanager.connect.retry_interval.secs"; - public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS - = 30; + /** Time interval between each attempt to connect to RM */ + public static final String RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS = + RM_PREFIX + "connect.retry-interval.ms"; + public static final long DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS + = 30 * 1000; /** * CLASSPATH for YARN applications. A comma-separated list of CLASSPATH diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java index 3b166a8806c..5fff760eb2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/client/RMProxy.java @@ -35,14 +35,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; -import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; -import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.ipc.YarnRPC; -import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; import com.google.common.annotations.VisibleForTesting; @@ -79,38 +75,36 @@ public class RMProxy { public static RetryPolicy createRetryPolicy(Configuration conf) { long rmConnectWaitMS = conf.getInt( - YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS, - YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS) - * 1000; + YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, + YarnConfiguration.DEFAULT_RESOURCEMANAGER_CONNECT_MAX_WAIT_MS); long rmConnectionRetryIntervalMS = conf.getLong( - YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS, + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, YarnConfiguration - .DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS) - * 1000; + .DEFAULT_RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS); if (rmConnectionRetryIntervalMS < 0) { throw new YarnRuntimeException("Invalid Configuration. " + - YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS + + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS + " should not be negative."); } - boolean waitForEver = (rmConnectWaitMS == -1000); + boolean waitForEver = (rmConnectWaitMS == -1); if (waitForEver) { return RetryPolicies.RETRY_FOREVER; } else { if (rmConnectWaitMS < 0) { throw new YarnRuntimeException("Invalid Configuration. " - + YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS + + YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS + " can be -1, but can not be other negative numbers"); } // try connect once if (rmConnectWaitMS < rmConnectionRetryIntervalMS) { - LOG.warn(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS + LOG.warn(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS + " is smaller than " - + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS + + YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS + ". Only try connect once."); rmConnectWaitMS = 0; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index b6753bc4adc..ab8d50aab10 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -140,6 +140,20 @@ 1000 + + Maximum time to wait to establish connection to + ResourceManager. + yarn.resourcemanager.connect.max-wait.ms + 900000 + + + + How often to try connecting to the + ResourceManager. + yarn.resourcemanager.connect.retry-interval.ms + 30000 + + The maximum number of application attempts. It's a global setting for all application masters. Each application master can specify @@ -249,7 +263,7 @@ RM state will be stored. This must be supplied when using org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore as the value for yarn.resourcemanager.store.class - yarn.resourcemanager.fs.rm-state-store.uri + yarn.resourcemanager.fs.state-store.uri ${hadoop.tmp.dir}/yarn/system/rmstore diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 78ab13ea835..d2119a75072 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -957,15 +957,14 @@ public class TestNodeStatusUpdater { @Test (timeout = 150000) public void testNMConnectionToRM() throws Exception { final long delta = 50000; - final long connectionWaitSecs = 5; - final long connectionRetryIntervalSecs = 1; + final long connectionWaitMs = 5000; + final long connectionRetryIntervalMs = 1000; //Waiting for rmStartIntervalMS, RM will be started final long rmStartIntervalMS = 2*1000; - conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS, - connectionWaitSecs); - conf.setLong(YarnConfiguration - .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS, - connectionRetryIntervalSecs); + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, + connectionWaitMs); + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, + connectionRetryIntervalMs); //Test NM try to connect to RM Several times, but finally fail NodeManagerWithCustomNodeStatusUpdater nmWithUpdater; @@ -987,15 +986,15 @@ public class TestNodeStatusUpdater { } catch(Exception e) { long t = System.currentTimeMillis(); long duration = t - waitStartTime; - boolean waitTimeValid = (duration >= connectionWaitSecs * 1000) - && (duration < (connectionWaitSecs * 1000 + delta)); + boolean waitTimeValid = (duration >= connectionWaitMs) + && (duration < (connectionWaitMs + delta)); if(!waitTimeValid) { //either the exception was too early, or it had a different cause. //reject with the inner stack trace throw new Exception("NM should have tried re-connecting to RM during " + - "period of at least " + connectionWaitSecs + " seconds, but " + - "stopped retrying within " + (connectionWaitSecs + delta/1000) + - " seconds: " + e, e); + "period of at least " + connectionWaitMs + " ms, but " + + "stopped retrying within " + (connectionWaitMs + delta) + + " ms: " + e, e); } } @@ -1149,14 +1148,14 @@ public class TestNodeStatusUpdater { @Test(timeout = 200000) public void testNodeStatusUpdaterRetryAndNMShutdown() throws Exception { - final long connectionWaitSecs = 1; - final long connectionRetryIntervalSecs = 1; + final long connectionWaitSecs = 1000; + final long connectionRetryIntervalMs = 1000; YarnConfiguration conf = createNMConfig(); - conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_SECS, + conf.setLong(YarnConfiguration.RESOURCEMANAGER_CONNECT_MAX_WAIT_MS, connectionWaitSecs); conf.setLong(YarnConfiguration - .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_SECS, - connectionRetryIntervalSecs); + .RESOURCEMANAGER_CONNECT_RETRY_INTERVAL_MS, + connectionRetryIntervalMs); conf.setLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, 5000); CyclicBarrier syncBarrier = new CyclicBarrier(2); nm = new MyNodeManager2(syncBarrier, conf); From d0b61a169ef5988a77148d1d071e21de9f7bb8e1 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 15 Aug 2013 03:58:23 +0000 Subject: [PATCH 005/153] HADOOP-9875. TestDoAsEffectiveUser can fail on JDK 7. (Aaron T. Myers via Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514147 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/security/TestDoAsEffectiveUser.java | 7 ++++++- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 15000ec72c6..3a5736a1f53 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -334,6 +334,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9652. RawLocalFs#getFileLinkStatus does not fill in the link owner and mode. (Andrew Wang via Colin Patrick McCabe) + HADOOP-9875. TestDoAsEffectiveUser can fail on JDK 7. (Aaron T. Myers via + Colin Patrick McCabe) + Release 2.1.1-beta - UNRELEASED diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java index 217174de497..830106dcee2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestDoAsEffectiveUser.java @@ -38,6 +38,7 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.authorize.ProxyUsers; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; +import org.junit.Before; import org.junit.Test; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager; import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier; @@ -58,7 +59,7 @@ public class TestDoAsEffectiveUser { GROUP2_NAME }; private static final String ADDRESS = "0.0.0.0"; private TestProtocol proxy; - private static Configuration masterConf = new Configuration(); + private static final Configuration masterConf = new Configuration(); public static final Log LOG = LogFactory @@ -70,6 +71,10 @@ public class TestDoAsEffectiveUser { "RULE:[2:$1@$0](.*@HADOOP.APACHE.ORG)s/@.*//" + "RULE:[1:$1@$0](.*@HADOOP.APACHE.ORG)s/@.*//" + "DEFAULT"); + } + + @Before + public void setMasterConf() { UserGroupInformation.setConfiguration(masterConf); } From 0182ea16d359b41c065bf9cbf740f8b23f6381e3 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Thu, 15 Aug 2013 04:52:52 +0000 Subject: [PATCH 006/153] HDFS-4898. BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to properly fallback to local rack. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514156 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../BlockPlacementPolicyWithNodeGroup.java | 15 ++++++++------- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4d62131b8cb..9b50809b47a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -324,6 +324,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5051. nn fails to download checkpointed image from snn in some setups. (Vinay and suresh via suresh) + HDFS-4898. BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to + properly fallback to local rack. (szetszwo) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index 643d2b401cd..e98318b9783 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -169,16 +169,17 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau long blocksize, int maxReplicasPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); - // randomly choose one node from remote racks + + final String rackLocation = NetworkTopology.getFirstHalf( + localMachine.getNetworkLocation()); try { - chooseRandom( - numOfReplicas, - "~" + NetworkTopology.getFirstHalf(localMachine.getNetworkLocation()), - excludedNodes, blocksize, maxReplicasPerRack, results, - avoidStaleNodes); + // randomly choose from remote racks + chooseRandom(numOfReplicas, "~" + rackLocation, excludedNodes, blocksize, + maxReplicasPerRack, results, avoidStaleNodes); } catch (NotEnoughReplicasException e) { + // fall back to the local rack chooseRandom(numOfReplicas - (results.size() - oldNumOfReplicas), - localMachine.getNetworkLocation(), excludedNodes, blocksize, + rackLocation, excludedNodes, blocksize, maxReplicasPerRack, results, avoidStaleNodes); } } From 97d04ae132135bdba276759b89cfc60851f4e6a0 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 15 Aug 2013 06:03:07 +0000 Subject: [PATCH 007/153] HDFS-4632. globStatus using backslash for escaping does not work on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514168 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../src/test/java/org/apache/hadoop/fs/TestGlobPaths.java | 3 +++ 2 files changed, 6 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 9b50809b47a..17554e93ed8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -327,6 +327,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4898. BlockPlacementPolicyWithNodeGroup.chooseRemoteRack() fails to properly fallback to local rack. (szetszwo) + HDFS-4632. globStatus using backslash for escaping does not work on Windows. + (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 5989b9be000..4f5474d2b64 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -466,6 +466,9 @@ public class TestGlobPaths { @Test public void pTestEscape() throws IOException { + // Skip the test case on Windows because backslash will be treated as a + // path separator instead of an escaping character on Windows. + org.junit.Assume.assumeTrue(!Path.WINDOWS); try { String [] files = new String[] {USER_DIR+"/ab\\[c.d"}; Path[] matchedPath = prepareTesting(USER_DIR+"/ab\\[c.d", files); From b776bd46aed2f5b3aa226af36c0081a7d1f69eda Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Thu, 15 Aug 2013 07:20:14 +0000 Subject: [PATCH 008/153] YARN-1045. Improve toString implementation for PBImpls. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514185 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../impl/pb/AllocateRequestPBImpl.java | 4 +++- .../impl/pb/AllocateResponsePBImpl.java | 4 +++- .../impl/pb/CancelDelegationTokenRequestPBImpl.java | 4 +++- .../impl/pb/CancelDelegationTokenResponsePBImpl.java | 4 +++- .../impl/pb/FinishApplicationMasterRequestPBImpl.java | 4 +++- .../pb/FinishApplicationMasterResponsePBImpl.java | 4 +++- .../impl/pb/GetApplicationReportRequestPBImpl.java | 4 +++- .../impl/pb/GetApplicationReportResponsePBImpl.java | 4 +++- .../impl/pb/GetApplicationsRequestPBImpl.java | 4 +++- .../impl/pb/GetApplicationsResponsePBImpl.java | 4 +++- .../impl/pb/GetClusterMetricsRequestPBImpl.java | 4 +++- .../impl/pb/GetClusterMetricsResponsePBImpl.java | 4 +++- .../impl/pb/GetClusterNodesRequestPBImpl.java | 11 +++++++---- .../impl/pb/GetClusterNodesResponsePBImpl.java | 4 +++- .../impl/pb/GetContainerStatusesRequestPBImpl.java | 5 +++-- .../impl/pb/GetContainerStatusesResponsePBImpl.java | 5 +++-- .../impl/pb/GetDelegationTokenRequestPBImpl.java | 4 +++- .../impl/pb/GetDelegationTokenResponsePBImpl.java | 4 +++- .../impl/pb/GetNewApplicationRequestPBImpl.java | 4 +++- .../impl/pb/GetNewApplicationResponsePBImpl.java | 4 +++- .../impl/pb/GetQueueInfoRequestPBImpl.java | 4 +++- .../impl/pb/GetQueueInfoResponsePBImpl.java | 4 +++- .../impl/pb/GetQueueUserAclsInfoRequestPBImpl.java | 4 +++- .../impl/pb/GetQueueUserAclsInfoResponsePBImpl.java | 4 +++- .../impl/pb/KillApplicationRequestPBImpl.java | 4 +++- .../impl/pb/KillApplicationResponsePBImpl.java | 4 +++- .../pb/RegisterApplicationMasterRequestPBImpl.java | 4 +++- .../pb/RegisterApplicationMasterResponsePBImpl.java | 3 ++- .../impl/pb/RenewDelegationTokenRequestPBImpl.java | 4 +++- .../impl/pb/RenewDelegationTokenResponsePBImpl.java | 4 +++- .../impl/pb/StartContainerRequestPBImpl.java | 4 +++- .../impl/pb/StartContainersResponsePBImpl.java | 4 ++-- .../impl/pb/StopContainersRequestPBImpl.java | 5 +++-- .../impl/pb/StopContainersResponsePBImpl.java | 5 +++-- .../impl/pb/SubmitApplicationRequestPBImpl.java | 4 +++- .../impl/pb/SubmitApplicationResponsePBImpl.java | 4 +++- .../api/protocolrecords/impl/pb/package-info.java | 1 + .../api/records/impl/pb/ApplicationReportPBImpl.java | 4 +++- .../impl/pb/ApplicationResourceUsageReportPBImpl.java | 4 +++- .../impl/pb/ApplicationSubmissionContextPBImpl.java | 4 +++- .../records/impl/pb/ContainerLaunchContextPBImpl.java | 3 ++- .../api/records/impl/pb/ContainerStatusPBImpl.java | 4 +++- .../yarn/api/records/impl/pb/LocalResourcePBImpl.java | 4 +++- .../yarn/api/records/impl/pb/NodeReportPBImpl.java | 4 +++- .../records/impl/pb/PreemptionContainerPBImpl.java | 4 +++- .../api/records/impl/pb/PreemptionContractPBImpl.java | 4 +++- .../api/records/impl/pb/PreemptionMessagePBImpl.java | 4 +++- .../impl/pb/PreemptionResourceRequestPBImpl.java | 4 +++- .../hadoop/yarn/api/records/impl/pb/ProtoBase.java | 3 ++- .../yarn/api/records/impl/pb/QueueInfoPBImpl.java | 4 +++- .../api/records/impl/pb/QueueUserACLInfoPBImpl.java | 4 +++- .../impl/pb/StrictPreemptionContractPBImpl.java | 4 +++- .../hadoop/yarn/api/records/impl/pb/URLPBImpl.java | 4 +++- .../api/records/impl/pb/YarnClusterMetricsPBImpl.java | 4 +++- .../hadoop/yarn/api/records/impl/pb/package-info.java | 1 + .../impl/pb/RefreshAdminAclsRequestPBImpl.java | 4 +++- .../impl/pb/RefreshAdminAclsResponsePBImpl.java | 4 +++- .../impl/pb/RefreshNodesRequestPBImpl.java | 4 +++- .../impl/pb/RefreshNodesResponsePBImpl.java | 4 +++- .../impl/pb/RefreshQueuesRequestPBImpl.java | 4 +++- .../impl/pb/RefreshQueuesResponsePBImpl.java | 4 +++- .../impl/pb/RefreshServiceAclsRequestPBImpl.java | 4 +++- .../impl/pb/RefreshServiceAclsResponsePBImpl.java | 4 +++- ...reshSuperUserGroupsConfigurationRequestPBImpl.java | 4 +++- ...eshSuperUserGroupsConfigurationResponsePBImpl.java | 4 +++- .../pb/RefreshUserToGroupsMappingsRequestPBImpl.java | 4 +++- .../pb/RefreshUserToGroupsMappingsResponsePBImpl.java | 4 +++- .../api/records/impl/pb/NodeHealthStatusPBImpl.java | 4 +++- 69 files changed, 202 insertions(+), 74 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 92744c0537b..51b03ef53cc 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -42,6 +42,8 @@ Release 2.1.1-beta - UNRELEASED IMPROVEMENTS YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza). + + YARN-1045. Improve toString implementation for PBImpls. (Jian He via sseth) OPTIMIZATIONS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java index 723ab5cf6cd..bff252f38a6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateRequestPBImpl.java @@ -38,6 +38,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class AllocateRequestPBImpl extends AllocateRequest { @@ -83,7 +85,7 @@ public class AllocateRequestPBImpl extends AllocateRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java index f38f91e064b..37d59713670 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/AllocateResponsePBImpl.java @@ -49,6 +49,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.AllocateResponseProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnServiceProtos.NMTokenProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class AllocateResponsePBImpl extends AllocateResponse { @@ -99,7 +101,7 @@ public class AllocateResponsePBImpl extends AllocateResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private synchronized void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java index e8e926beb4a..d44578418a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import com.google.protobuf.TextFormat; + @Private @Unstable public class CancelDelegationTokenRequestPBImpl extends @@ -90,7 +92,7 @@ public class CancelDelegationTokenRequestPBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java index 334e1edd8ea..ec2b2b2081b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java @@ -22,6 +22,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class CancelDelegationTokenResponsePBImpl extends CancelDelegationTokenResponse { @@ -58,6 +60,6 @@ public class CancelDelegationTokenResponsePBImpl extends CancelDelegationTokenRe @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java index 233c3114834..2805f82e2e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class FinishApplicationMasterRequestPBImpl extends FinishApplicationMasterRequest { @@ -68,7 +70,7 @@ public class FinishApplicationMasterRequestPBImpl extends FinishApplicationMaste @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java index 70f4f96dd58..ff57eb42d77 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/FinishApplicationMasterResponsePBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterResponse; import org.apache.hadoop.yarn.proto.YarnServiceProtos.FinishApplicationMasterResponseProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class FinishApplicationMasterResponsePBImpl extends FinishApplicationMasterResponse { @@ -63,6 +65,6 @@ public class FinishApplicationMasterResponsePBImpl extends FinishApplicationMast @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java index e1e3308293f..47f43180869 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationReportRequestPBImpl extends GetApplicationReportRequest { @@ -71,7 +73,7 @@ public class GetApplicationReportRequestPBImpl extends GetApplicationReportReque @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java index ef61dfae54c..8a54898278a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationReportResponsePBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationReportResponsePBImpl extends GetApplicationReportResponse { @@ -71,7 +73,7 @@ public class GetApplicationReportResponsePBImpl extends GetApplicationReportResp @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index dda5e2137d7..48a8d85ab8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { @@ -123,6 +125,6 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java index b0897c67820..453fc894a1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetApplicationsResponsePBImpl @@ -90,7 +92,7 @@ extends GetApplicationsResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java index 9dcad99269e..2288da84a34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsRequestPBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetClusterMetricsRequestPBImpl extends GetClusterMetricsRequest { @@ -63,6 +65,6 @@ public class GetClusterMetricsRequestPBImpl extends GetClusterMetricsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java index 635307aad6b..7502753a00f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterMetricsResponsePBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetClusterMetricsResponsePBImpl extends GetClusterMetricsResponse { @@ -71,7 +73,7 @@ public class GetClusterMetricsResponsePBImpl extends GetClusterMetricsResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java index 4e51320bf17..09c0fc7a136 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesRequestPBImpl.java @@ -18,17 +18,20 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceStability.Unstable; import java.util.EnumSet; import java.util.Iterator; import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProtoOrBuilder; -import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; + +import com.google.protobuf.TextFormat; @Private @Unstable @@ -152,6 +155,6 @@ public class GetClusterNodesRequestPBImpl extends GetClusterNodesRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java index 23210d46f6c..04530e53334 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetClusterNodesResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetClusterNodesResponsePBImpl extends GetClusterNodesResponse { @@ -89,7 +91,7 @@ public class GetClusterNodesResponsePBImpl extends GetClusterNodesResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java index 0c305ca8862..bbc1492c4d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesRequestPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetContainerStatusesRequestPBImpl extends @@ -75,8 +77,7 @@ public class GetContainerStatusesRequestPBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java index 18df2146612..fb8885be62b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetContainerStatusesResponsePBImpl.java @@ -39,6 +39,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerExceptionMapProto import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetContainerStatusesResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetContainerStatusesResponsePBImpl extends @@ -85,8 +87,7 @@ public class GetContainerStatusesResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java index 8e98c88efd9..435b807d3b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequest import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetDelegationTokenRequestPBImpl extends GetDelegationTokenRequest { @@ -86,7 +88,7 @@ public class GetDelegationTokenRequestPBImpl extends GetDelegationTokenRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java index c51d80d1fba..93f4b5bba27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java @@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetDelegationTokenResponsePBImpl extends GetDelegationTokenResponse { @@ -94,7 +96,7 @@ public class GetDelegationTokenResponsePBImpl extends GetDelegationTokenResponse @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java index 1fb3b70ba85..a5699f3d8af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationRequestPBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetNewApplicationRequestPBImpl extends GetNewApplicationRequest { @@ -63,6 +65,6 @@ public class GetNewApplicationRequestPBImpl extends GetNewApplicationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java index bf1a6c283ac..eb8ca2c5968 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetNewApplicationResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetNewApplicationResponsePBImpl extends GetNewApplicationResponse { @@ -74,7 +76,7 @@ public class GetNewApplicationResponsePBImpl extends GetNewApplicationResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java index 664ea23157d..c79e0c40e0c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoRequestPBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueInfoRequestPBImpl extends GetQueueInfoRequest { @@ -124,6 +126,6 @@ public class GetQueueInfoRequestPBImpl extends GetQueueInfoRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java index 7d60ae16a89..7193d92fe3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueInfoResponsePBImpl.java @@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueInfoResponsePBImpl extends GetQueueInfoResponse { @@ -71,7 +73,7 @@ public class GetQueueInfoResponsePBImpl extends GetQueueInfoResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java index d964e85f064..e11ba5d10fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueUserAclsInfoRequest; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueUserAclsInfoRequestPBImpl extends GetQueueUserAclsInfoRequest { @@ -64,6 +66,6 @@ public class GetQueueUserAclsInfoRequestPBImpl extends GetQueueUserAclsInfoReque @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java index d9fdadaf1c6..8d405d9726a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetQueueUserAclsInfoResponsePBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class GetQueueUserAclsInfoResponsePBImpl extends GetQueueUserAclsInfoResponse { @@ -90,7 +92,7 @@ public class GetQueueUserAclsInfoResponsePBImpl extends GetQueueUserAclsInfoResp @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java index 2b13a72085b..db973676828 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class KillApplicationRequestPBImpl extends KillApplicationRequest { @@ -71,7 +73,7 @@ public class KillApplicationRequestPBImpl extends KillApplicationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java index f16d4177ec3..14e0c1f74af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/KillApplicationResponsePBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationResponse; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class KillApplicationResponsePBImpl extends KillApplicationResponse { @@ -63,6 +65,6 @@ public class KillApplicationResponsePBImpl extends KillApplicationResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java index a2d2024d381..037dfd98760 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterRequestPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationMasterRequest { @@ -65,7 +67,7 @@ public class RegisterApplicationMasterRequestPBImpl extends RegisterApplicationM @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java index 75ce2009f33..486304c7fb1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RegisterApplicationMasterResponsePBImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterR import org.apache.hadoop.yarn.proto.YarnServiceProtos.RegisterApplicationMasterResponseProtoOrBuilder; import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; @Private @@ -85,7 +86,7 @@ public class RegisterApplicationMasterResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java index 3e109c5d275..dac3c9b9b89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.impl.pb.TokenPBImpl; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RenewDelegationTokenRequestPBImpl extends @@ -89,7 +91,7 @@ public class RenewDelegationTokenRequestPBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java index ecf0b30a172..9d20b469873 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRespo import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProtoOrBuilder; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RenewDelegationTokenResponsePBImpl extends @@ -66,7 +68,7 @@ public class RenewDelegationTokenResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void maybeInitBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java index acb9d34cfed..c1cd0ebbfc2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainerRequestPBImpl.java @@ -31,6 +31,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainerRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StartContainerRequestPBImpl extends StartContainerRequest { @@ -75,7 +77,7 @@ public class StartContainerRequestPBImpl extends StartContainerRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java index 1482cd779b0..8f5c740bd2e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StartContainersResponsePBImpl.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponsePro import org.apache.hadoop.yarn.proto.YarnServiceProtos.StartContainersResponseProtoOrBuilder; import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; @Private @Unstable @@ -87,8 +88,7 @@ public class StartContainersResponsePBImpl extends StartContainersResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java index 5c758e80629..27e092b706a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersRequestPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StopContainersRequestPBImpl extends StopContainersRequest { @@ -73,8 +75,7 @@ public class StopContainersRequestPBImpl extends StopContainersRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java index 5385d0a0ab2..dd28b06443d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/StopContainersResponsePBImpl.java @@ -37,6 +37,8 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.ContainerExceptionMapProto import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.StopContainersResponseProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StopContainersResponsePBImpl extends StopContainersResponse { @@ -80,8 +82,7 @@ public class StopContainersResponsePBImpl extends StopContainersResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ") - .replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java index ab33412514d..ad45d9adb75 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationRequestPBImpl.java @@ -28,6 +28,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class SubmitApplicationRequestPBImpl extends SubmitApplicationRequest { @@ -71,7 +73,7 @@ public class SubmitApplicationRequestPBImpl extends SubmitApplicationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java index 8ac49f5dfb1..9e127767662 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/SubmitApplicationResponsePBImpl.java @@ -24,6 +24,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationResponse; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class SubmitApplicationResponsePBImpl extends SubmitApplicationResponse { @@ -63,6 +65,6 @@ public class SubmitApplicationResponsePBImpl extends SubmitApplicationResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java index f9c7e5c7d28..4b29e4f740e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/package-info.java @@ -18,3 +18,4 @@ @InterfaceAudience.Private package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; import org.apache.hadoop.classification.InterfaceAudience; + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java index c68c2447799..9716f74a681 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java @@ -36,6 +36,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportPro import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ApplicationReportPBImpl extends ApplicationReport { @@ -424,7 +426,7 @@ public class ApplicationReportPBImpl extends ApplicationReport { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java index eb834241ef0..ada716593e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationResourceUsageReportPBImpl.java @@ -26,6 +26,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportPro import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ApplicationResourceUsageReportPBImpl @@ -73,7 +75,7 @@ extends ApplicationResourceUsageReport { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java index 96f280ac282..5b48141adda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationSubmissionContextPBImpl.java @@ -32,6 +32,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerLaunchContextProto; import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ApplicationSubmissionContextPBImpl @@ -80,7 +82,7 @@ extends ApplicationSubmissionContext { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java index 02e0d3b23e3..12dcfcd9f8f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java @@ -39,6 +39,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.StringLocalResourceMapProto; import org.apache.hadoop.yarn.proto.YarnProtos.StringStringMapProto; import com.google.protobuf.ByteString; +import com.google.protobuf.TextFormat; @Private @Unstable @@ -89,7 +90,7 @@ extends ContainerLaunchContext { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } protected final ByteBuffer convertFromProtoFormat(ByteString byteString) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java index a52ae4c2f4a..9cb28f4d0d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerStatusPBImpl.java @@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStatusProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class ContainerStatusPBImpl extends ContainerStatus { @@ -72,7 +74,7 @@ public class ContainerStatusPBImpl extends ContainerStatus { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java index d2caf0b4a44..16bd59740d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/LocalResourcePBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto; import org.apache.hadoop.yarn.proto.YarnProtos.URLProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class LocalResourcePBImpl extends LocalResource { @@ -72,7 +74,7 @@ public class LocalResourcePBImpl extends LocalResource { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private synchronized void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java index e1a66d76956..7a1b1b1c5fd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/NodeReportPBImpl.java @@ -29,6 +29,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class NodeReportPBImpl extends NodeReport { @@ -234,7 +236,7 @@ public class NodeReportPBImpl extends NodeReport { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java index 55ead2061c3..8d46cbdaf27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContainerPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionContainerPBImpl extends PreemptionContainer { @@ -69,7 +71,7 @@ public class PreemptionContainerPBImpl extends PreemptionContainer { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java index 07a0af95c47..2d234f2f8c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionContractPBImpl.java @@ -33,6 +33,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContractProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionContractPBImpl extends PreemptionContract { @@ -77,7 +79,7 @@ public class PreemptionContractPBImpl extends PreemptionContract { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java index 65dc820b90b..6cac49ae685 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionMessagePBImpl.java @@ -27,6 +27,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionMessageProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionMessagePBImpl extends PreemptionMessage { @@ -71,7 +73,7 @@ public class PreemptionMessagePBImpl extends PreemptionMessage { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java index 97930c3dae4..a8972b15f96 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/PreemptionResourceRequestPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProto; import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionResourceRequestProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceRequestProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class PreemptionResourceRequestPBImpl extends PreemptionResourceRequest { @@ -69,7 +71,7 @@ public class PreemptionResourceRequestPBImpl extends PreemptionResourceRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java index 4e8c39bc9f6..bd3cc54bb0c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ProtoBase.java @@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import com.google.protobuf.ByteString; import com.google.protobuf.Message; +import com.google.protobuf.TextFormat; @Private @Unstable @@ -51,7 +52,7 @@ public abstract class ProtoBase { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } protected final ByteBuffer convertFromProtoFormat(ByteString byteString) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java index 529bd8b848a..56a5b584324 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueInfoPBImpl.java @@ -32,6 +32,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueInfoProtoOrBuilder; import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto; +import com.google.protobuf.TextFormat; + @Private @Unstable public class QueueInfoPBImpl extends QueueInfo { @@ -175,7 +177,7 @@ public class QueueInfoPBImpl extends QueueInfo { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void initLocalApplicationsList() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java index cf484103008..4aa9b0955e6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/QueueUserACLInfoPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueUserACLInfoProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class QueueUserACLInfoPBImpl extends QueueUserACLInfo { @@ -103,7 +105,7 @@ public class QueueUserACLInfoPBImpl extends QueueUserACLInfo { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void initLocalQueueUserAclsList() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java index 0d80921396b..28569b46b74 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/StrictPreemptionContractPBImpl.java @@ -30,6 +30,8 @@ import org.apache.hadoop.yarn.proto.YarnProtos.PreemptionContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProto; import org.apache.hadoop.yarn.proto.YarnProtos.StrictPreemptionContractProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class StrictPreemptionContractPBImpl extends StrictPreemptionContract { @@ -74,7 +76,7 @@ public class StrictPreemptionContractPBImpl extends StrictPreemptionContract { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java index 14cc762e020..c5586c766d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/URLPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.proto.YarnProtos.URLProto; import org.apache.hadoop.yarn.proto.YarnProtos.URLProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class URLPBImpl extends URL { @@ -64,7 +66,7 @@ public class URLPBImpl extends URL { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void maybeInitBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java index cef6c62a46c..ce2f7483331 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/YarnClusterMetricsPBImpl.java @@ -25,6 +25,8 @@ import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnClusterMetricsProtoOrBuilder; +import com.google.protobuf.TextFormat; + @Private @Unstable public class YarnClusterMetricsPBImpl extends YarnClusterMetrics { @@ -64,7 +66,7 @@ public class YarnClusterMetricsPBImpl extends YarnClusterMetrics { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void maybeInitBuilder() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java index 1f14e60d800..2571db8e8dc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/package-info.java @@ -18,3 +18,4 @@ @InterfaceAudience.Private package org.apache.hadoop.yarn.api.records.impl.pb; import org.apache.hadoop.classification.InterfaceAudience; + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java index 5aba8e3150a..c2af34e622e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshAdminAclsRequestPBImpl @@ -64,6 +66,6 @@ extends RefreshAdminAclsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java index f1288c50a16..b9c8a278e3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshAdminAclsResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshAdminAclsResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshAdminAclsResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshAdminAclsResponsePBImpl extends RefreshAdminAclsResponse { @@ -63,6 +65,6 @@ public class RefreshAdminAclsResponsePBImpl extends RefreshAdminAclsResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java index 6231d9b4919..2cea95a2b12 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshNodesRequestPBImpl extends RefreshNodesRequest { @@ -63,6 +65,6 @@ public class RefreshNodesRequestPBImpl extends RefreshNodesRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java index 4ac1f8a6729..0b4bf2749d4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshNodesResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshNodesResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshNodesResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshNodesResponsePBImpl extends RefreshNodesResponse { @@ -63,6 +65,6 @@ public class RefreshNodesResponsePBImpl extends RefreshNodesResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java index 87f881c9fba..62067277c80 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshQueuesRequestPBImpl extends RefreshQueuesRequest { @@ -63,6 +65,6 @@ public class RefreshQueuesRequestPBImpl extends RefreshQueuesRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java index 4ff71088b7f..6d50b8311b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshQueuesResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshQueuesResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshQueuesResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshQueuesResponsePBImpl extends RefreshQueuesResponse { @@ -63,6 +65,6 @@ public class RefreshQueuesResponsePBImpl extends RefreshQueuesResponse { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java index 8add1ec5555..7a0bb2e4453 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshServiceAclsRequestPBImpl extends RefreshServiceAclsRequest { @@ -65,6 +67,6 @@ public class RefreshServiceAclsRequestPBImpl extends RefreshServiceAclsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java index f24645937d3..d3ea3a4ee09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshServiceAclsResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshServiceAclsResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshServiceAclsResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshServiceAclsResponsePBImpl extends @@ -66,6 +68,6 @@ public class RefreshServiceAclsResponsePBImpl extends @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java index a93a7b7320b..7620f48262d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshSuperUserGroupsConfigurationRequestPBImpl @@ -64,6 +66,6 @@ extends RefreshSuperUserGroupsConfigurationRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java index b899ff169a1..dca301a4fad 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshSuperUserGroupsConfigurationResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshSuperUserGroupsConfigurationResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshSuperUserGroupsConfigurationResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshSuperUserGroupsConfigurationResponsePBImpl extends RefreshSuperUserGroupsConfigurationResponse { @@ -63,6 +65,6 @@ public class RefreshSuperUserGroupsConfigurationResponsePBImpl extends RefreshSu @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java index f1a41beefbc..080f97c4cf7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsRequestPBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsRequestProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsRequest; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshUserToGroupsMappingsRequestPBImpl @@ -64,6 +66,6 @@ extends RefreshUserToGroupsMappingsRequest { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java index 52a8b99f1f0..972ebfaab99 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RefreshUserToGroupsMappingsResponsePBImpl.java @@ -23,6 +23,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.proto.YarnServerResourceManagerServiceProtos.RefreshUserToGroupsMappingsResponseProto; import org.apache.hadoop.yarn.server.api.protocolrecords.RefreshUserToGroupsMappingsResponse; +import com.google.protobuf.TextFormat; + @Private @Unstable public class RefreshUserToGroupsMappingsResponsePBImpl extends RefreshUserToGroupsMappingsResponse { @@ -63,6 +65,6 @@ public class RefreshUserToGroupsMappingsResponsePBImpl extends RefreshUserToGrou @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java index 52e378c89b9..75aa3d1cfd0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/records/impl/pb/NodeHealthStatusPBImpl.java @@ -22,6 +22,8 @@ import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProto import org.apache.hadoop.yarn.proto.YarnServerCommonProtos.NodeHealthStatusProtoOrBuilder; import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; +import com.google.protobuf.TextFormat; + public class NodeHealthStatusPBImpl extends NodeHealthStatus { private NodeHealthStatusProto.Builder builder; @@ -62,7 +64,7 @@ public class NodeHealthStatusPBImpl extends NodeHealthStatus { @Override public String toString() { - return getProto().toString().replaceAll("\\n", ", ").replaceAll("\\s+", " "); + return TextFormat.shortDebugString(getProto()); } private void mergeLocalToProto() { From 26c5a490e5f39377749aff90a22efab626c081df Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 15 Aug 2013 16:51:07 +0000 Subject: [PATCH 009/153] HDFS-5093. TestGlobPaths should re-use the MiniDFSCluster to avoid failure on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514366 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/fs/TestGlobPaths.java | 218 +++++++++--------- 2 files changed, 116 insertions(+), 105 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 17554e93ed8..effb3a62a4f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -330,6 +330,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4632. globStatus using backslash for escaping does not work on Windows. (Chuan Liu via cnauroth) + HDFS-5093. TestGlobPaths should re-use the MiniDFSCluster to avoid failure + on Windows. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index 4f5474d2b64..b712be10f0f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -20,7 +20,6 @@ package org.apache.hadoop.fs; import static org.junit.Assert.*; import java.io.IOException; -import java.util.Arrays; import java.util.regex.Pattern; import org.apache.commons.lang.StringUtils; @@ -30,8 +29,6 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.junit.*; -import com.google.common.base.Joiner; - public class TestGlobPaths { static class RegexPathFilter implements PathFilter { @@ -50,6 +47,7 @@ public class TestGlobPaths { static private MiniDFSCluster dfsCluster; static private FileSystem fs; + static private FileContext fc; static final private int NUM_OF_PATHS = 4; static private String USER_DIR; private Path[] path = new Path[NUM_OF_PATHS]; @@ -59,6 +57,7 @@ public class TestGlobPaths { Configuration conf = new HdfsConfiguration(); dfsCluster = new MiniDFSCluster.Builder(conf).build(); fs = FileSystem.get(conf); + fc = FileContext.getFileContext(conf); USER_DIR = fs.getHomeDirectory().toUri().getPath().toString(); } @@ -803,28 +802,24 @@ public class TestGlobPaths { /** * Run a glob test on FileSystem. */ - private static void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + private void testOnFileSystem(FSTestWrapperGlobTest test) throws Exception { try { - FileSystem fs = FileSystem.get(conf); + fc.mkdir(new Path(USER_DIR), FsPermission.getDefault(), true); test.run(new FileSystemTestWrapper(fs), fs, null); } finally { - cluster.shutdown(); + fc.delete(new Path(USER_DIR), true); } } /** * Run a glob test on FileContext. */ - private static void testOnFileContext(FSTestWrapperGlobTest test) throws Exception { - Configuration conf = new HdfsConfiguration(); - MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build(); + private void testOnFileContext(FSTestWrapperGlobTest test) throws Exception { try { - FileContext fc = FileContext.getFileContext(conf); + fs.mkdirs(new Path(USER_DIR)); test.run(new FileContextTestWrapper(fc), null, fc); } finally { - cluster.shutdown(); + cleanupDFS(); } } @@ -857,32 +852,33 @@ public class TestGlobPaths { throws Exception { // Test that globbing through a symlink to a directory yields a path // containing that symlink. - wrap.mkdir(new Path("/alpha"), - FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false); - wrap.mkdir(new Path("/alphaLink/beta"), + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLink"), false); + wrap.mkdir(new Path(USER_DIR + "/alphaLink/beta"), FsPermission.getDirDefault(), false); // Test simple glob - FileStatus[] statuses = - wrap.globStatus(new Path("/alpha/*"), new AcceptAllPathFilter()); - Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alpha/beta", - statuses[0].getPath().toUri().getPath()); - // Test glob through symlink - statuses = - wrap.globStatus(new Path("/alphaLink/*"), new AcceptAllPathFilter()); - Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLink/beta", - statuses[0].getPath().toUri().getPath()); - // If the terminal path component in a globbed path is a symlink, - // we don't dereference that link. - wrap.createSymlink(new Path("beta"), new Path("/alphaLink/betaLink"), - false); - statuses = wrap.globStatus(new Path("/alpha/betaLi*"), + FileStatus[] statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/*"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alpha/betaLink", - statuses[0].getPath().toUri().getPath()); + Assert.assertEquals(USER_DIR + "/alpha/beta", statuses[0].getPath() + .toUri().getPath()); + // Test glob through symlink + statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLink/*"), + new AcceptAllPathFilter()); + Assert.assertEquals(1, statuses.length); + Assert.assertEquals(USER_DIR + "/alphaLink/beta", statuses[0].getPath() + .toUri().getPath()); + // If the terminal path component in a globbed path is a symlink, + // we don't dereference that link. + wrap.createSymlink(new Path("beta"), new Path(USER_DIR + + "/alphaLink/betaLink"), false); + statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/betaLi*"), + new AcceptAllPathFilter()); + Assert.assertEquals(1, statuses.length); + Assert.assertEquals(USER_DIR + "/alpha/betaLink", statuses[0].getPath() + .toUri().getPath()); // todo: test symlink-to-symlink-to-dir, etc. } } @@ -902,58 +898,64 @@ public class TestGlobPaths { * * Also test globbing dangling symlinks. It should NOT throw any exceptions! */ - private static class TestGlobWithSymlinksToSymlinks - implements FSTestWrapperGlobTest { + private static class TestGlobWithSymlinksToSymlinks implements + FSTestWrapperGlobTest { public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) throws Exception { // Test that globbing through a symlink to a symlink to a directory // fully resolves - wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false); - wrap.createSymlink(new Path("/alphaLink"), - new Path("/alphaLinkLink"), false); - wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false); + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLink"), false); + wrap.createSymlink(new Path(USER_DIR + "/alphaLink"), new Path(USER_DIR + + "/alphaLinkLink"), false); + wrap.mkdir(new Path(USER_DIR + "/alpha/beta"), + FsPermission.getDirDefault(), false); // Test glob through symlink to a symlink to a directory - FileStatus statuses[] = - wrap.globStatus(new Path("/alphaLinkLink"), new AcceptAllPathFilter()); + FileStatus statuses[] = wrap.globStatus(new Path(USER_DIR + + "/alphaLinkLink"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLinkLink", - statuses[0].getPath().toUri().getPath()); - statuses = - wrap.globStatus(new Path("/alphaLinkLink/*"), new AcceptAllPathFilter()); + Assert.assertEquals(USER_DIR + "/alphaLinkLink", statuses[0].getPath() + .toUri().getPath()); + statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkLink/*"), + new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLinkLink/beta", - statuses[0].getPath().toUri().getPath()); + Assert.assertEquals(USER_DIR + "/alphaLinkLink/beta", statuses[0] + .getPath().toUri().getPath()); // Test glob of dangling symlink (theta does not actually exist) - wrap.createSymlink(new Path("theta"), new Path("/alpha/kappa"), false); - statuses = wrap.globStatus(new Path("/alpha/kappa/kappa"), - new AcceptAllPathFilter()); + wrap.createSymlink(new Path(USER_DIR + "theta"), new Path(USER_DIR + + "/alpha/kappa"), false); + statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/kappa/kappa"), + new AcceptAllPathFilter()); Assert.assertNull(statuses); // Test glob of symlinks - wrap.createFile("/alpha/beta/gamma"); - wrap.createSymlink(new Path("gamma"), - new Path("/alpha/beta/gammaLink"), false); - wrap.createSymlink(new Path("gammaLink"), - new Path("/alpha/beta/gammaLinkLink"), false); - wrap.createSymlink(new Path("gammaLinkLink"), - new Path("/alpha/beta/gammaLinkLinkLink"), false); - statuses = wrap.globStatus(new Path("/alpha/*/gammaLinkLinkLink"), - new AcceptAllPathFilter()); + wrap.createFile(USER_DIR + "/alpha/beta/gamma"); + wrap.createSymlink(new Path(USER_DIR + "gamma"), new Path(USER_DIR + + "/alpha/beta/gammaLink"), false); + wrap.createSymlink(new Path(USER_DIR + "gammaLink"), new Path(USER_DIR + + "/alpha/beta/gammaLinkLink"), false); + wrap.createSymlink(new Path(USER_DIR + "gammaLinkLink"), new Path( + USER_DIR + "/alpha/beta/gammaLinkLinkLink"), false); + statuses = wrap.globStatus(new Path(USER_DIR + + "/alpha/*/gammaLinkLinkLink"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alpha/beta/gammaLinkLinkLink", + Assert.assertEquals(USER_DIR + "/alpha/beta/gammaLinkLinkLink", statuses[0].getPath().toUri().getPath()); - statuses = wrap.globStatus(new Path("/alpha/beta/*"), - new AcceptAllPathFilter()); - Assert.assertEquals("/alpha/beta/gamma;/alpha/beta/gammaLink;" + - "/alpha/beta/gammaLinkLink;/alpha/beta/gammaLinkLinkLink", + statuses = wrap.globStatus(new Path(USER_DIR + "/alpha/beta/*"), + new AcceptAllPathFilter()); + Assert.assertEquals(USER_DIR + "/alpha/beta/gamma;" + USER_DIR + + "/alpha/beta/gammaLink;" + USER_DIR + "/alpha/beta/gammaLinkLink;" + + USER_DIR + "/alpha/beta/gammaLinkLinkLink", TestPath.mergeStatuses(statuses)); // Let's create two symlinks that point to each other, and glob on them. - wrap.createSymlink(new Path("tweedledee"), - new Path("/tweedledum"), false); - wrap.createSymlink(new Path("tweedledum"), - new Path("/tweedledee"), false); - statuses = wrap.globStatus(new Path("/tweedledee/unobtainium"), - new AcceptAllPathFilter()); + wrap.createSymlink(new Path(USER_DIR + "tweedledee"), new Path(USER_DIR + + "/tweedledum"), false); + wrap.createSymlink(new Path(USER_DIR + "tweedledum"), new Path(USER_DIR + + "/tweedledee"), false); + statuses = wrap.globStatus( + new Path(USER_DIR + "/tweedledee/unobtainium"), + new AcceptAllPathFilter()); Assert.assertNull(statuses); } } @@ -971,34 +973,39 @@ public class TestGlobPaths { /** * Test globbing symlinks with a custom PathFilter */ - private static class TestGlobSymlinksWithCustomPathFilter - implements FSTestWrapperGlobTest { + private static class TestGlobSymlinksWithCustomPathFilter implements + FSTestWrapperGlobTest { public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) throws Exception { // Test that globbing through a symlink to a symlink to a directory // fully resolves - wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLinkz"), false); - wrap.mkdir(new Path("/alpha/beta"), FsPermission.getDirDefault(), false); - wrap.mkdir(new Path("/alpha/betaz"), FsPermission.getDirDefault(), false); - // Test glob through symlink to a symlink to a directory, with a PathFilter - FileStatus statuses[] = - wrap.globStatus(new Path("/alpha/beta"), new AcceptPathsEndingInZ()); + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLinkz"), false); + wrap.mkdir(new Path(USER_DIR + "/alpha/beta"), + FsPermission.getDirDefault(), false); + wrap.mkdir(new Path(USER_DIR + "/alpha/betaz"), + FsPermission.getDirDefault(), false); + // Test glob through symlink to a symlink to a directory, with a + // PathFilter + FileStatus statuses[] = wrap.globStatus( + new Path(USER_DIR + "/alpha/beta"), new AcceptPathsEndingInZ()); Assert.assertNull(statuses); - statuses = - wrap.globStatus(new Path("/alphaLinkz/betaz"), new AcceptPathsEndingInZ()); + statuses = wrap.globStatus(new Path(USER_DIR + "/alphaLinkz/betaz"), + new AcceptPathsEndingInZ()); Assert.assertEquals(1, statuses.length); - Assert.assertEquals("/alphaLinkz/betaz", - statuses[0].getPath().toUri().getPath()); - statuses = - wrap.globStatus(new Path("/*/*"), new AcceptPathsEndingInZ()); - Assert.assertEquals("/alpha/betaz;/alphaLinkz/betaz", - TestPath.mergeStatuses(statuses)); - statuses = - wrap.globStatus(new Path("/*/*"), new AcceptAllPathFilter()); - Assert.assertEquals("/alpha/beta;/alpha/betaz;" + - "/alphaLinkz/beta;/alphaLinkz/betaz", - TestPath.mergeStatuses(statuses)); + Assert.assertEquals(USER_DIR + "/alphaLinkz/betaz", statuses[0].getPath() + .toUri().getPath()); + statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"), + new AcceptPathsEndingInZ()); + Assert.assertEquals(USER_DIR + "/alpha/betaz;" + USER_DIR + + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses)); + statuses = wrap.globStatus(new Path(USER_DIR + "/*/*"), + new AcceptAllPathFilter()); + Assert.assertEquals(USER_DIR + "/alpha/beta;" + USER_DIR + + "/alpha/betaz;" + USER_DIR + "/alphaLinkz/beta;" + USER_DIR + + "/alphaLinkz/betaz", TestPath.mergeStatuses(statuses)); } } @@ -1015,24 +1022,25 @@ public class TestGlobPaths { /** * Test that globStatus fills in the scheme even when it is not provided. */ - private static class TestGlobFillsInScheme - implements FSTestWrapperGlobTest { - public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) + private static class TestGlobFillsInScheme implements FSTestWrapperGlobTest { + public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) throws Exception { // Verify that the default scheme is hdfs, when we don't supply one. - wrap.mkdir(new Path("/alpha"), FsPermission.getDirDefault(), false); - wrap.createSymlink(new Path("/alpha"), new Path("/alphaLink"), false); - FileStatus statuses[] = - wrap.globStatus(new Path("/alphaLink"), new AcceptAllPathFilter()); + wrap.mkdir(new Path(USER_DIR + "/alpha"), FsPermission.getDirDefault(), + false); + wrap.createSymlink(new Path(USER_DIR + "/alpha"), new Path(USER_DIR + + "/alphaLink"), false); + FileStatus statuses[] = wrap.globStatus( + new Path(USER_DIR + "/alphaLink"), new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); Path path = statuses[0].getPath(); - Assert.assertEquals("/alphaLink", path.toUri().getPath()); + Assert.assertEquals(USER_DIR + "/alphaLink", path.toUri().getPath()); Assert.assertEquals("hdfs", path.toUri().getScheme()); if (fc != null) { // If we're using FileContext, then we can list a file:/// URI. // Since everyone should have the root directory, we list that. - statuses = - wrap.globStatus(new Path("file:///"), new AcceptAllPathFilter()); + statuses = wrap.globStatus(new Path("file:///"), + new AcceptAllPathFilter()); Assert.assertEquals(1, statuses.length); Path filePath = statuses[0].getPath(); Assert.assertEquals("file", filePath.toUri().getScheme()); From 8172215e5601c3bb03fb5c0a0d88768142ea5087 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 15 Aug 2013 17:19:52 +0000 Subject: [PATCH 010/153] HDFS-5080. BootstrapStandby not working with QJM when the existing NN is active. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514386 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/qjournal/client/AsyncLogger.java | 2 +- .../hdfs/qjournal/client/AsyncLoggerSet.java | 6 +- .../qjournal/client/IPCLoggerChannel.java | 5 +- .../qjournal/client/QuorumJournalManager.java | 5 +- .../qjournal/protocol/QJournalProtocol.java | 7 +- ...JournalProtocolServerSideTranslatorPB.java | 3 +- .../QJournalProtocolTranslatorPB.java | 4 +- .../hadoop/hdfs/qjournal/server/Journal.java | 31 +++- .../qjournal/server/JournalNodeRpcServer.java | 5 +- .../hdfs/server/namenode/FSEditLog.java | 16 +- .../server/namenode/FileJournalManager.java | 12 +- .../hdfs/server/namenode/JournalSet.java | 13 +- .../server/namenode/ha/BootstrapStandby.java | 2 +- .../src/main/proto/QJournalProtocol.proto | 1 + .../hdfs/server/namenode/FSImageTestUtil.java | 13 ++ .../namenode/TestFileJournalManager.java | 2 +- .../namenode/ha/TestBootstrapStandby.java | 20 +-- .../ha/TestBootstrapStandbyWithQJM.java | 170 ++++++++++++++++++ 19 files changed, 265 insertions(+), 55 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index effb3a62a4f..e720915987b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -333,6 +333,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5093. TestGlobPaths should re-use the MiniDFSCluster to avoid failure on Windows. (Chuan Liu via cnauroth) + HDFS-5080. BootstrapStandby not working with QJM when the existing NN is + active. (jing9) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java index dda1de1164b..2501e009931 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLogger.java @@ -109,7 +109,7 @@ interface AsyncLogger { * Fetch the list of edit logs available on the remote node. */ public ListenableFuture getEditLogManifest( - long fromTxnId, boolean forReading); + long fromTxnId, boolean forReading, boolean inProgressOk); /** * Prepare recovery. See the HDFS-3077 design document for details. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java index 3beff863efb..74131936bde 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/AsyncLoggerSet.java @@ -262,14 +262,14 @@ class AsyncLoggerSet { return QuorumCall.create(calls); } - public QuorumCall - getEditLogManifest(long fromTxnId, boolean forReading) { + public QuorumCall getEditLogManifest( + long fromTxnId, boolean forReading, boolean inProgressOk) { Map> calls = Maps.newHashMap(); for (AsyncLogger logger : loggers) { ListenableFuture future = - logger.getEditLogManifest(fromTxnId, forReading); + logger.getEditLogManifest(fromTxnId, forReading, inProgressOk); calls.put(logger, future); } return QuorumCall.create(calls); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java index 9115804a966..4603dbd0207 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java @@ -519,12 +519,13 @@ public class IPCLoggerChannel implements AsyncLogger { @Override public ListenableFuture getEditLogManifest( - final long fromTxnId, final boolean forReading) { + final long fromTxnId, final boolean forReading, + final boolean inProgressOk) { return executor.submit(new Callable() { @Override public RemoteEditLogManifest call() throws IOException { GetEditLogManifestResponseProto ret = getProxy().getEditLogManifest( - journalId, fromTxnId, forReading); + journalId, fromTxnId, forReading, inProgressOk); // Update the http port, since we need this to build URLs to any of the // returned logs. httpPort = ret.getHttpPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java index 385200176e8..4f1b96b6f42 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/QuorumJournalManager.java @@ -456,7 +456,7 @@ public class QuorumJournalManager implements JournalManager { long fromTxnId, boolean inProgressOk, boolean forReading) throws IOException { QuorumCall q = - loggers.getEditLogManifest(fromTxnId, forReading); + loggers.getEditLogManifest(fromTxnId, forReading, inProgressOk); Map resps = loggers.waitForWriteQuorum(q, selectInputStreamsTimeoutMs, "selectInputStreams"); @@ -480,8 +480,7 @@ public class QuorumJournalManager implements JournalManager { allStreams.add(elis); } } - JournalSet.chainAndMakeRedundantStreams( - streams, allStreams, fromTxnId, inProgressOk); + JournalSet.chainAndMakeRedundantStreams(streams, allStreams, fromTxnId); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java index 63d7a755170..15ee76c6e9b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocol/QJournalProtocol.java @@ -125,10 +125,13 @@ public interface QJournalProtocol { * @param sinceTxId the first transaction which the client cares about * @param forReading whether or not the caller intends to read from the edit * logs + * @param inProgressOk whether or not to check the in-progress edit log + * segment * @return a list of edit log segments since the given transaction ID. */ - public GetEditLogManifestResponseProto getEditLogManifest( - String jid, long sinceTxId, boolean forReading) throws IOException; + public GetEditLogManifestResponseProto getEditLogManifest(String jid, + long sinceTxId, boolean forReading, boolean inProgressOk) + throws IOException; /** * Begin the recovery process for a given segment. See the HDFS-3077 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java index bdebb380a35..50714040268 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java @@ -203,7 +203,8 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP return impl.getEditLogManifest( request.getJid().getIdentifier(), request.getSinceTxId(), - request.getForReading()); + request.getForReading(), + request.getInProgressOk()); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java index 7b36ff5c025..2df7d94bc5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolTranslatorPB.java @@ -228,13 +228,15 @@ public class QJournalProtocolTranslatorPB implements ProtocolMetaInterface, @Override public GetEditLogManifestResponseProto getEditLogManifest(String jid, - long sinceTxId, boolean forReading) throws IOException { + long sinceTxId, boolean forReading, boolean inProgressOk) + throws IOException { try { return rpcProxy.getEditLogManifest(NULL_CONTROLLER, GetEditLogManifestRequestProto.newBuilder() .setJid(convertJournalId(jid)) .setSinceTxId(sinceTxId) .setForReading(forReading) + .setInProgressOk(inProgressOk) .build()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java index 4e286b539a5..b68516b6c6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java @@ -25,10 +25,9 @@ import java.io.InputStream; import java.io.OutputStreamWriter; import java.net.URL; import java.security.PrivilegedExceptionAction; +import java.util.Iterator; import java.util.List; import java.util.concurrent.TimeUnit; -import java.util.regex.Matcher; -import java.util.regex.Pattern; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -36,8 +35,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.qjournal.protocol.JournalNotFormattedException; -import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.JournalOutOfSyncException; +import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocol; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.NewEpochResponseProto; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PersistedRecoveryPaxosData; import org.apache.hadoop.hdfs.qjournal.protocol.QJournalProtocolProtos.PrepareRecoveryResponseProto; @@ -50,6 +49,7 @@ import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile; import org.apache.hadoop.hdfs.server.namenode.JournalManager; import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; import org.apache.hadoop.hdfs.util.AtomicFileOutputStream; import org.apache.hadoop.hdfs.util.BestEffortLongFile; @@ -630,14 +630,31 @@ class Journal implements Closeable { * @see QJournalProtocol#getEditLogManifest(String, long) */ public RemoteEditLogManifest getEditLogManifest(long sinceTxId, - boolean forReading) throws IOException { + boolean forReading, boolean inProgressOk) throws IOException { // No need to checkRequest() here - anyone may ask for the list // of segments. checkFormatted(); - RemoteEditLogManifest manifest = new RemoteEditLogManifest( - fjm.getRemoteEditLogs(sinceTxId, forReading)); - return manifest; + // if this is for reading, ignore the in-progress editlog segment + inProgressOk = forReading ? false : inProgressOk; + List logs = fjm.getRemoteEditLogs(sinceTxId, forReading, + inProgressOk); + + if (inProgressOk) { + RemoteEditLog log = null; + for (Iterator iter = logs.iterator(); iter.hasNext();) { + log = iter.next(); + if (log.isInProgress()) { + iter.remove(); + break; + } + } + if (log != null && log.isInProgress()) { + logs.add(new RemoteEditLog(log.getStartTxId(), getHighestWrittenTxId())); + } + } + + return new RemoteEditLogManifest(logs); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java index d00ba2d145f..79bd333ad34 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeRpcServer.java @@ -175,10 +175,11 @@ class JournalNodeRpcServer implements QJournalProtocol { @Override public GetEditLogManifestResponseProto getEditLogManifest(String jid, - long sinceTxId, boolean forReading) throws IOException { + long sinceTxId, boolean forReading, boolean inProgressOk) + throws IOException { RemoteEditLogManifest manifest = jn.getOrCreateJournal(jid) - .getEditLogManifest(sinceTxId, forReading); + .getEditLogManifest(sinceTxId, forReading, inProgressOk); return GetEditLogManifestResponseProto.newBuilder() .setManifest(PBHelper.convert(manifest)) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java index df90a8fc10f..60ffe7ac172 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java @@ -1274,6 +1274,7 @@ public class FSEditLog implements LogsPurgeable { } } + @Override public void selectInputStreams(Collection streams, long fromTxId, boolean inProgressOk, boolean forReading) { journalSet.selectInputStreams(streams, fromTxId, inProgressOk, forReading); @@ -1284,18 +1285,27 @@ public class FSEditLog implements LogsPurgeable { return selectInputStreams(fromTxId, toAtLeastTxId, null, true); } + /** Select a list of input streams to load */ + public Collection selectInputStreams( + long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, + boolean inProgressOk) throws IOException { + return selectInputStreams(fromTxId, toAtLeastTxId, recovery, inProgressOk, + true); + } + /** - * Select a list of input streams to load. + * Select a list of input streams. * * @param fromTxId first transaction in the selected streams * @param toAtLeast the selected streams must contain this transaction * @param inProgessOk set to true if in-progress streams are OK + * @param forReading whether or not to use the streams to load the edit log */ public synchronized Collection selectInputStreams( long fromTxId, long toAtLeastTxId, MetaRecoveryContext recovery, - boolean inProgressOk) throws IOException { + boolean inProgressOk, boolean forReading) throws IOException { List streams = new ArrayList(); - selectInputStreams(streams, fromTxId, inProgressOk, true); + selectInputStreams(streams, fromTxId, inProgressOk, forReading); try { checkForGaps(streams, fromTxId, toAtLeastTxId, inProgressOk); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java index f745693ceb9..77aca197ab6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java @@ -169,18 +169,26 @@ public class FileJournalManager implements JournalManager { * @param fromTxId the txnid which to start looking * @param forReading whether or not the caller intends to read from the edit * logs + * @param inProgressOk whether or not to include the in-progress edit log + * segment * @return a list of remote edit logs * @throws IOException if edit logs cannot be listed. */ public List getRemoteEditLogs(long firstTxId, - boolean forReading) throws IOException { + boolean forReading, boolean inProgressOk) throws IOException { + // make sure not reading in-progress edit log, i.e., if forReading is true, + // we should ignore the in-progress edit log. + Preconditions.checkArgument(!(forReading && inProgressOk)); + File currentDir = sd.getCurrentDir(); List allLogFiles = matchEditLogs(currentDir); List ret = Lists.newArrayListWithCapacity( allLogFiles.size()); for (EditLogFile elf : allLogFiles) { - if (elf.hasCorruptHeader() || elf.isInProgress()) continue; + if (elf.hasCorruptHeader() || (!inProgressOk && elf.isInProgress())) { + continue; + } if (elf.getFirstTxId() >= firstTxId) { ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId)); } else if (elf.getFirstTxId() < firstTxId && firstTxId <= elf.getLastTxId()) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java index 396524dbaf1..1d43cb73527 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import static org.apache.hadoop.util.ExitUtil.terminate; + import java.io.IOException; import java.util.ArrayList; import java.util.Collection; @@ -31,14 +33,10 @@ import java.util.concurrent.CopyOnWriteArrayList; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; - -import static org.apache.hadoop.util.ExitUtil.terminate; - import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog; import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.collect.ComparisonChain; import com.google.common.collect.ImmutableList; @@ -257,13 +255,12 @@ public class JournalSet implements JournalManager { ". Skipping.", ioe); } } - chainAndMakeRedundantStreams(streams, allStreams, fromTxId, inProgressOk); + chainAndMakeRedundantStreams(streams, allStreams, fromTxId); } public static void chainAndMakeRedundantStreams( Collection outStreams, - PriorityQueue allStreams, - long fromTxId, boolean inProgressOk) { + PriorityQueue allStreams, long fromTxId) { // We want to group together all the streams that start on the same start // transaction ID. To do this, we maintain an accumulator (acc) of all // the streams we've seen at a given start transaction ID. When we see a @@ -598,7 +595,7 @@ public class JournalSet implements JournalManager { if (j.getManager() instanceof FileJournalManager) { FileJournalManager fjm = (FileJournalManager)j.getManager(); try { - allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading)); + allLogs.addAll(fjm.getRemoteEditLogs(fromTxId, forReading, false)); } catch (Throwable t) { LOG.warn("Cannot list edit logs in " + fjm, t); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java index e9549ce8f18..41325257e35 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java @@ -226,7 +226,7 @@ public class BootstrapStandby implements Tool, Configurable { try { Collection streams = image.getEditLog().selectInputStreams( - firstTxIdInLogs, curTxIdOnOtherNode, null, true); + firstTxIdInLogs, curTxIdOnOtherNode, null, true, false); for (EditLogInputStream stream : streams) { IOUtils.closeStream(stream); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto index 16c0277b9be..a9e8017e96f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/QJournalProtocol.proto @@ -177,6 +177,7 @@ message GetEditLogManifestRequestProto { required uint64 sinceTxId = 2; // Transaction ID // Whether or not the client will be reading from the returned streams. optional bool forReading = 3 [default = true]; + optional bool inProgressOk = 4 [default = false]; } message GetEditLogManifestResponseProto { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java index 6fc8d6e4784..7c2c7e2f98c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/FSImageTestUtil.java @@ -62,6 +62,7 @@ import org.mockito.Mockito; import com.google.common.base.Joiner; import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableSet; import com.google.common.collect.Lists; import com.google.common.collect.Maps; import com.google.common.collect.Sets; @@ -555,4 +556,16 @@ public abstract class FSImageTestUtil { public static long getNSQuota(FSNamesystem ns) { return ns.dir.rootDir.getNsQuota(); } + + public static void assertNNFilesMatch(MiniDFSCluster cluster) throws Exception { + List curDirs = Lists.newArrayList(); + curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0)); + curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1)); + + // Ignore seen_txid file, since the newly bootstrapped standby + // will have a higher seen_txid than the one it bootstrapped from. + Set ignoredFiles = ImmutableSet.of("seen_txid"); + FSImageTestUtil.assertParallelFilesAreIdentical(curDirs, + ignoredFiles); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index e3fd99a4b71..44d1058806f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -479,6 +479,6 @@ public class TestFileJournalManager { private static String getLogsAsString( FileJournalManager fjm, long firstTxId) throws IOException { - return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, true)); + return Joiner.on(",").join(fjm.getRemoteEditLogs(firstTxId, true, false)); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index d38fdd7982b..678e03866d5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -24,8 +24,6 @@ import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; import java.net.URI; -import java.util.List; -import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -45,8 +43,6 @@ import org.junit.Before; import org.junit.Test; import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; public class TestBootstrapStandby { private static final Log LOG = LogFactory.getLog(TestBootstrapStandby.class); @@ -107,7 +103,7 @@ public class TestBootstrapStandby { // Should have copied over the namespace from the active FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, ImmutableList.of(0)); - assertNNFilesMatch(); + FSImageTestUtil.assertNNFilesMatch(cluster); // We should now be able to start the standby successfully. cluster.restartNameNode(1); @@ -138,7 +134,7 @@ public class TestBootstrapStandby { // Should have copied over the namespace from the active FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, ImmutableList.of((int)expectedCheckpointTxId)); - assertNNFilesMatch(); + FSImageTestUtil.assertNNFilesMatch(cluster); // We should now be able to start the standby successfully. cluster.restartNameNode(1); @@ -208,18 +204,6 @@ public class TestBootstrapStandby { cluster.getConfiguration(1)); assertEquals(0, rc); } - - private void assertNNFilesMatch() throws Exception { - List curDirs = Lists.newArrayList(); - curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 0)); - curDirs.addAll(FSImageTestUtil.getNameNodeCurrentDirs(cluster, 1)); - - // Ignore seen_txid file, since the newly bootstrapped standby - // will have a higher seen_txid than the one it bootstrapped from. - Set ignoredFiles = ImmutableSet.of("seen_txid"); - FSImageTestUtil.assertParallelFilesAreIdentical(curDirs, - ignoredFiles); - } private void removeStandbyNameDirs() { for (URI u : cluster.getNameDirs(1)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java new file mode 100644 index 00000000000..e618c9a5ed0 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandbyWithQJM.java @@ -0,0 +1,170 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode.ha; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_NAMENODES_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import java.net.URI; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; +import org.apache.hadoop.hdfs.DistributedFileSystem; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.MiniDFSNNTopology; +import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; +import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.ImmutableList; + +/** + * Test BootstrapStandby when QJM is used for shared edits. + */ +public class TestBootstrapStandbyWithQJM { + + private static final String NAMESERVICE = "ns1"; + private static final String NN1 = "nn1"; + private static final String NN2 = "nn2"; + private static final int NUM_JN = 3; + private static final int NN1_IPC_PORT = 10000; + private static final int NN1_INFO_PORT = 10001; + private static final int NN2_IPC_PORT = 10002; + private static final int NN2_INFO_PORT = 10003; + + private MiniDFSCluster cluster; + private MiniJournalCluster jCluster; + + @Before + public void setup() throws Exception { + // start 3 journal nodes + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true) + .numJournalNodes(NUM_JN).build(); + URI journalURI = jCluster.getQuorumJournalURI(NAMESERVICE); + + // start cluster with 2 NameNodes + MiniDFSNNTopology topology = new MiniDFSNNTopology() + .addNameservice(new MiniDFSNNTopology.NSConf(NAMESERVICE).addNN( + new MiniDFSNNTopology.NNConf("nn1").setIpcPort(NN1_IPC_PORT) + .setHttpPort(NN1_INFO_PORT)).addNN( + new MiniDFSNNTopology.NNConf("nn2").setIpcPort(NN2_IPC_PORT) + .setHttpPort(NN2_INFO_PORT))); + + Configuration conf = initHAConf(journalURI); + cluster = new MiniDFSCluster.Builder(conf).nnTopology(topology) + .numDataNodes(1).manageNameDfsSharedDirs(false).build(); + cluster.waitActive(); + + Configuration confNN0 = new Configuration(conf); + cluster.shutdown(); + // initialize the journal nodes + confNN0.set(DFSConfigKeys.DFS_HA_NAMENODE_ID_KEY, "nn1"); + NameNode.initializeSharedEdits(confNN0, true); + + // restart the cluster + cluster = new MiniDFSCluster.Builder(conf).format(false) + .nnTopology(topology).numDataNodes(1).manageNameDfsSharedDirs(false) + .build(); + cluster.waitActive(); + + // make nn0 active + cluster.transitionToActive(0); + // do sth to generate in-progress edit log data + DistributedFileSystem dfs = (DistributedFileSystem) + HATestUtil.configureFailoverFs(cluster, conf); + dfs.mkdirs(new Path("/test2")); + dfs.close(); + } + + @After + public void cleanup() throws IOException { + if (cluster != null) { + cluster.shutdown(); + } + if (jCluster != null) { + jCluster.shutdown(); + } + } + + private Configuration initHAConf(URI journalURI) { + Configuration conf = new Configuration(); + conf.set(DFSConfigKeys.DFS_NAMENODE_SHARED_EDITS_DIR_KEY, + journalURI.toString()); + + String address1 = "127.0.0.1:" + NN1_IPC_PORT; + String address2 = "127.0.0.1:" + NN2_IPC_PORT; + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, + NAMESERVICE, NN1), address1); + conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, + NAMESERVICE, NN2), address2); + conf.set(DFSConfigKeys.DFS_NAMESERVICES, NAMESERVICE); + conf.set(DFSUtil.addKeySuffixes(DFS_HA_NAMENODES_KEY_PREFIX, NAMESERVICE), + NN1 + "," + NN2); + conf.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + NAMESERVICE, + ConfiguredFailoverProxyProvider.class.getName()); + conf.set("fs.defaultFS", "hdfs://" + NAMESERVICE); + + return conf; + } + + /** BootstrapStandby when the existing NN is standby */ + @Test + public void testBootstrapStandbyWithStandbyNN() throws Exception { + // make the first NN in standby state + cluster.transitionToStandby(0); + Configuration confNN1 = cluster.getConfiguration(1); + + // shut down nn1 + cluster.shutdownNameNode(1); + + int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1); + assertEquals(0, rc); + + // Should have copied over the namespace from the standby + FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, + ImmutableList.of(0)); + FSImageTestUtil.assertNNFilesMatch(cluster); + } + + /** BootstrapStandby when the existing NN is active */ + @Test + public void testBootstrapStandbyWithActiveNN() throws Exception { + // make the first NN in active state + cluster.transitionToActive(0); + Configuration confNN1 = cluster.getConfiguration(1); + + // shut down nn1 + cluster.shutdownNameNode(1); + + int rc = BootstrapStandby.run(new String[] { "-force" }, confNN1); + assertEquals(0, rc); + + // Should have copied over the namespace from the standby + FSImageTestUtil.assertNNHasCheckpoints(cluster, 1, + ImmutableList.of(0)); + FSImageTestUtil.assertNNFilesMatch(cluster); + } +} From 02b19e0738d9df1e4d38280c5575e1d3ba49f8cb Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 15 Aug 2013 18:22:52 +0000 Subject: [PATCH 011/153] HDFS-5076. Add MXBean methods to query NN's transaction information and JournalNode's journal status. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514422 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/qjournal/server/JournalNode.java | 52 ++++++++- .../qjournal/server/JournalNodeMXBean.java | 36 ++++++ .../hdfs/server/namenode/FSNamesystem.java | 10 ++ .../hdfs/server/namenode/NameNodeMXBean.java | 6 + .../server/TestJournalNodeMXBean.java | 107 ++++++++++++++++++ .../server/namenode/TestNameNodeMXBean.java | 5 + 7 files changed, 218 insertions(+), 1 deletion(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e720915987b..44308674028 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -288,6 +288,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4763 Add script changes/utility for starting NFS gateway (brandonli) + HDFS-5076 Add MXBean methods to query NN's transaction information and + JournalNode's journal status. (jing9) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java index 8291b5932eb..4ed4244ac16 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java @@ -18,8 +18,10 @@ package org.apache.hadoop.hdfs.qjournal.server; import java.io.File; +import java.io.FileFilter; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.HashMap; import java.util.Map; import org.apache.commons.logging.Log; @@ -34,11 +36,13 @@ import org.apache.hadoop.hdfs.server.common.StorageErrorReporter; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; +import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.util.DiskChecker; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; +import org.mortbay.util.ajax.JSON; import com.google.common.base.Preconditions; import com.google.common.collect.Maps; @@ -51,7 +55,7 @@ import com.google.common.collect.Maps; * in the quorum protocol. */ @InterfaceAudience.Private -public class JournalNode implements Tool, Configurable { +public class JournalNode implements Tool, Configurable, JournalNodeMXBean { public static final Log LOG = LogFactory.getLog(JournalNode.class); private Configuration conf; private JournalNodeRpcServer rpcServer; @@ -128,6 +132,8 @@ public class JournalNode implements Tool, Configurable { SecurityUtil.login(conf, DFSConfigKeys.DFS_JOURNALNODE_KEYTAB_FILE_KEY, DFSConfigKeys.DFS_JOURNALNODE_USER_NAME_KEY, socAddr.getHostName()); + registerJNMXBean(); + httpServer = new JournalNodeHttpServer(conf, this); httpServer.start(); @@ -208,6 +214,50 @@ public class JournalNode implements Tool, Configurable { return new File(new File(dir), jid); } + @Override // JournalNodeMXBean + public String getJournalsStatus() { + // jid:{Formatted:True/False} + Map> status = + new HashMap>(); + synchronized (this) { + for (Map.Entry entry : journalsById.entrySet()) { + Map jMap = new HashMap(); + jMap.put("Formatted", Boolean.toString(entry.getValue().isFormatted())); + status.put(entry.getKey(), jMap); + } + } + + // It is possible that some journals have been formatted before, while the + // corresponding journals are not in journalsById yet (because of restarting + // JN, e.g.). For simplicity, let's just assume a journal is formatted if + // there is a directory for it. We can also call analyzeStorage method for + // these directories if necessary. + // Also note that we do not need to check localDir here since + // validateAndCreateJournalDir has been called before we register the + // MXBean. + File[] journalDirs = localDir.listFiles(new FileFilter() { + @Override + public boolean accept(File file) { + return file.isDirectory(); + } + }); + for (File journalDir : journalDirs) { + String jid = journalDir.getName(); + if (!status.containsKey(jid)) { + Map jMap = new HashMap(); + jMap.put("Formatted", "true"); + status.put(jid, jMap); + } + } + return JSON.toString(status); + } + + /** + * Register JournalNodeMXBean + */ + private void registerJNMXBean() { + MBeans.register("JournalNode", "JournalNodeInfo", this); + } private class ErrorReporter implements StorageErrorReporter { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java new file mode 100644 index 00000000000..4e8d9da50f9 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeMXBean.java @@ -0,0 +1,36 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +/** + * This is the JMX management interface for JournalNode information + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public interface JournalNodeMXBean { + + /** + * Get status information (e.g., whether formatted) of JournalNode's journals. + * + * @return A string presenting status for each journal + */ + public String getJournalsStatus(); +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 989f688a0fd..2f230d73509 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6364,6 +6364,16 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return JSON.toString(jasList); } + @Override // NameNodeMxBean + public String getJournalTransactionInfo() { + Map txnIdMap = new HashMap(); + txnIdMap.put("LastAppliedOrWrittenTxId", + Long.toString(this.getFSImage().getLastAppliedOrWrittenTxId())); + txnIdMap.put("MostRecentCheckpointTxId", + Long.toString(this.getFSImage().getMostRecentCheckpointTxId())); + return JSON.toString(txnIdMap); + } + @Override // NameNodeMXBean public String getNNStarted() { return getStartTime().toString(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index 50315a4ae67..173d5aea4c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -188,6 +188,12 @@ public interface NameNodeMXBean { * @return the name journal status information, as a JSON string. */ public String getNameJournalStatus(); + + /** + * Get information about the transaction ID, including the last applied + * transaction ID and the most recent checkpoint's transaction ID + */ + public String getJournalTransactionInfo(); /** * Gets the NN start time diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java new file mode 100644 index 00000000000..347184870f6 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java @@ -0,0 +1,107 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.qjournal.server; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; + +import java.io.IOException; +import java.lang.management.ManagementFactory; +import java.util.HashMap; +import java.util.Map; + +import javax.management.MBeanServer; +import javax.management.ObjectName; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.qjournal.MiniJournalCluster; +import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; +import org.mortbay.util.ajax.JSON; + +/** + * Test {@link JournalNodeMXBean} + */ +public class TestJournalNodeMXBean { + + private static final String NAMESERVICE = "ns1"; + private static final int NUM_JN = 1; + + private MiniJournalCluster jCluster; + private JournalNode jn; + + @Before + public void setup() throws IOException { + // start 1 journal node + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(true) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + } + + @After + public void cleanup() throws IOException { + if (jCluster != null) { + jCluster.shutdown(); + } + } + + @Test + public void testJournalNodeMXBean() throws Exception { + // we have not formatted the journals yet, and the journal status in jmx + // should be empty since journal objects are created lazily + MBeanServer mbs = ManagementFactory.getPlatformMBeanServer(); + ObjectName mxbeanName = new ObjectName( + "Hadoop:service=JournalNode,name=JournalNodeInfo"); + + // getJournalsStatus + String journalStatus = (String) mbs.getAttribute(mxbeanName, + "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + assertFalse(journalStatus.contains(NAMESERVICE)); + + // format the journal ns1 + final NamespaceInfo FAKE_NSINFO = new NamespaceInfo(12345, "mycluster", + "my-bp", 0L); + jn.getOrCreateJournal(NAMESERVICE).format(FAKE_NSINFO); + + // check again after format + // getJournalsStatus + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + Map> jMap = new HashMap>(); + Map infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + + // restart journal node without formatting + jCluster = new MiniJournalCluster.Builder(new Configuration()).format(false) + .numJournalNodes(NUM_JN).build(); + jn = jCluster.getJournalNode(0); + // re-check + journalStatus = (String) mbs.getAttribute(mxbeanName, "JournalsStatus"); + assertEquals(jn.getJournalsStatus(), journalStatus); + jMap = new HashMap>(); + infoMap = new HashMap(); + infoMap.put("Formatted", "true"); + jMap.put(NAMESERVICE, infoMap); + assertEquals(JSON.toString(jMap), journalStatus); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java index 227d2cef402..8d188d7b651 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java @@ -120,6 +120,11 @@ public class TestNameNodeMXBean { String nameJournalStatus = (String) (mbs.getAttribute(mxbeanName, "NameJournalStatus")); assertEquals("Bad value for NameJournalStatus", fsn.getNameJournalStatus(), nameJournalStatus); + // get attribute JournalTransactionInfo + String journalTxnInfo = (String) mbs.getAttribute(mxbeanName, + "JournalTransactionInfo"); + assertEquals("Bad value for NameTxnIds", fsn.getJournalTransactionInfo(), + journalTxnInfo); // get attribute "NNStarted" String nnStarted = (String) mbs.getAttribute(mxbeanName, "NNStarted"); assertEquals("Bad value for NNStarted", fsn.getNNStarted(), nnStarted); From d62bd71d650fbe143b222235083be80efb1b63ef Mon Sep 17 00:00:00 2001 From: Siddharth Seth Date: Thu, 15 Aug 2013 18:44:52 +0000 Subject: [PATCH 012/153] Update CHANGES.txt to move YARN-1045 and MAPREDUCE-5352 to the correct version. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514432 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 6 +++--- hadoop-yarn-project/CHANGES.txt | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 066264c9ea5..933e9fcf9c2 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -186,9 +186,6 @@ Release 2.1.1-beta - UNRELEASED OPTIMIZATIONS - MAPREDUCE-5352. Optimize node local splits generated by - CombineFileInputFormat. (sseth) - MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race conditions (jlowe via kihwal) @@ -395,6 +392,9 @@ Release 2.1.0-beta - 2013-08-06 MAPREDUCE-5268. Improve history server startup performance (Karthik Kambatla via jlowe) + MAPREDUCE-5352. Optimize node local splits generated by + CombineFileInputFormat. (sseth) + BUG FIXES MAPREDUCE-4671. AM does not tell the RM about container requests which are diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 51b03ef53cc..b12b10d21ff 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -43,8 +43,6 @@ Release 2.1.1-beta - UNRELEASED YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza). - YARN-1045. Improve toString implementation for PBImpls. (Jian He via sseth) - OPTIMIZATIONS BUG FIXES @@ -524,6 +522,8 @@ Release 2.1.0-beta - 2013-08-06 YARN-1046. Disable mem monitoring by default in MiniYARNCluster. (Karthik Kambatla via Sandy Ryza) + YARN-1045. Improve toString implementation for PBImpls. (Jian He via sseth) + OPTIMIZATIONS YARN-512. Log aggregation root directory check is more expensive than it From 0e47ebb32f62b9312b44ccbec97e302674cdee80 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Thu, 15 Aug 2013 19:22:09 +0000 Subject: [PATCH 013/153] HADOOP-9868. Server must not advertise kerberos realm. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514448 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../src/main/java/org/apache/hadoop/security/SaslRpcServer.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3a5736a1f53..43a89d897dd 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -394,6 +394,8 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9381. Document dfs cp -f option. (Keegan Witt, suresh via suresh) + HADOOP-9868. Server must not advertise kerberos realm. (daryn via kihwal) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index fffedc1a11e..9408028ffa2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -104,7 +104,7 @@ public class SaslRpcServer { if (LOG.isDebugEnabled()) LOG.debug("Kerberos principal name is " + fullName); // don't use KerberosName because we don't want auth_to_local - String[] parts = fullName.split("[/@]", 2); + String[] parts = fullName.split("[/@]", 3); protocol = parts[0]; // should verify service host is present here rather than in create() // but lazy tests are using a UGI that isn't a SPN... From a37d2fc89d79b73049fa7678b1b5165aa4de423a Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Thu, 15 Aug 2013 20:29:43 +0000 Subject: [PATCH 014/153] Preparing for hadoop-2.1.0-beta rc2. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514469 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +- hadoop-mapreduce-project/CHANGES.txt | 2 +- hadoop-yarn-project/CHANGES.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 43a89d897dd..9518bf278af 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -396,7 +396,7 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9868. Server must not advertise kerberos realm. (daryn via kihwal) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 44308674028..fbd7e61eb66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -339,7 +339,7 @@ Release 2.1.1-beta - UNRELEASED HDFS-5080. BootstrapStandby not working with QJM when the existing NN is active. (jing9) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 933e9fcf9c2..613a38a52f5 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -220,7 +220,7 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5454. TestDFSIO fails intermittently on JDK7 (Karthik Kambatla via Sandy Ryza) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index b12b10d21ff..9d10e2a6efe 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -70,7 +70,7 @@ Release 2.1.1-beta - UNRELEASED YARN-337. RM handles killed application tracking URL poorly (jlowe) -Release 2.1.0-beta - 2013-08-06 +Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES From 2fc7e14e392f188958b9867a5d2dd563dfcc378a Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 15 Aug 2013 20:43:46 +0000 Subject: [PATCH 015/153] HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close EditLogInputStreams upon finishing. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514481 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/server/namenode/NameNode.java | 64 +++++++++++-------- 2 files changed, 39 insertions(+), 28 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index fbd7e61eb66..02beab5c609 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -339,6 +339,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5080. BootstrapStandby not working with QJM when the existing NN is active. (jing9) + HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close + EditLogInputStreams upon finishing. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index a933585523a..b8a51390c11 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -956,41 +956,49 @@ public class NameNode implements NameNodeStatusMXBean { FSEditLog sourceEditLog = fsns.getFSImage().editLog; long fromTxId = fsns.getFSImage().getMostRecentCheckpointTxId(); - Collection streams = sourceEditLog.selectInputStreams( - fromTxId+1, 0); - - // Set the nextTxid to the CheckpointTxId+1 - newSharedEditLog.setNextTxId(fromTxId + 1); - // Copy all edits after last CheckpointTxId to shared edits dir - for (EditLogInputStream stream : streams) { - LOG.debug("Beginning to copy stream " + stream + " to shared edits"); - FSEditLogOp op; - boolean segmentOpen = false; - while ((op = stream.readOp()) != null) { - if (LOG.isTraceEnabled()) { - LOG.trace("copying op: " + op); - } - if (!segmentOpen) { - newSharedEditLog.startLogSegment(op.txid, false); - segmentOpen = true; - } - - newSharedEditLog.logEdit(op); + Collection streams = null; + try { + streams = sourceEditLog.selectInputStreams(fromTxId + 1, 0); - if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) { + // Set the nextTxid to the CheckpointTxId+1 + newSharedEditLog.setNextTxId(fromTxId + 1); + + // Copy all edits after last CheckpointTxId to shared edits dir + for (EditLogInputStream stream : streams) { + LOG.debug("Beginning to copy stream " + stream + " to shared edits"); + FSEditLogOp op; + boolean segmentOpen = false; + while ((op = stream.readOp()) != null) { + if (LOG.isTraceEnabled()) { + LOG.trace("copying op: " + op); + } + if (!segmentOpen) { + newSharedEditLog.startLogSegment(op.txid, false); + segmentOpen = true; + } + + newSharedEditLog.logEdit(op); + + if (op.opCode == FSEditLogOpCodes.OP_END_LOG_SEGMENT) { + newSharedEditLog.logSync(); + newSharedEditLog.endCurrentLogSegment(false); + LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + + stream); + segmentOpen = false; + } + } + + if (segmentOpen) { + LOG.debug("ending log segment because of end of stream in " + stream); newSharedEditLog.logSync(); newSharedEditLog.endCurrentLogSegment(false); - LOG.debug("ending log segment because of END_LOG_SEGMENT op in " + stream); segmentOpen = false; } } - - if (segmentOpen) { - LOG.debug("ending log segment because of end of stream in " + stream); - newSharedEditLog.logSync(); - newSharedEditLog.endCurrentLogSegment(false); - segmentOpen = false; + } finally { + if (streams != null) { + FSEditLog.closeAllStreams(streams); } } } From 10ec8a248ecbe37e52f81b13b939174eb43eda1f Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Thu, 15 Aug 2013 21:21:10 +0000 Subject: [PATCH 016/153] HDFS-2994. If lease soft limit is recovered successfully the append can fail. Contributed by Tao Luo. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514500 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSNamesystem.java | 9 +++- .../apache/hadoop/hdfs/TestFileAppend.java | 44 +++++++++++++++++++ 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 02beab5c609..081c080868f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -342,6 +342,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5099. Namenode#copyEditLogSegmentsToSharedDir should close EditLogInputStreams upon finishing. (Chuan Liu via cnauroth) + HDFS-2994. If lease soft limit is recovered successfully + the append can fail. (Tao Luo via shv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 2f230d73509..f15fe4fe388 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -2153,10 +2153,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throw new FileNotFoundException("failed to append to non-existent file " + src + " on client " + clientMachine); } - final INodeFile myFile = INodeFile.valueOf(inode, src, true); + INodeFile myFile = INodeFile.valueOf(inode, src, true); // Opening an existing file for write - may need to recover lease. recoverLeaseInternal(myFile, src, holder, clientMachine, false); - + + // recoverLeaseInternal may create a new InodeFile via + // finalizeINodeFileUnderConstruction so we need to refresh + // the referenced file. + myFile = INodeFile.valueOf(dir.getINode(src), src, true); + final DatanodeDescriptor clientNode = blockManager.getDatanodeManager().getDatanodeByHost(clientMachine); return prepareFileForWrite(src, myFile, holder, clientMachine, clientNode, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java index 54ff9036b91..e4015944692 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.File; @@ -337,4 +338,47 @@ public class TestFileAppend{ cluster.shutdown(); } } + + /** Tests appending after soft-limit expires. */ + @Test + public void testAppendAfterSoftLimit() + throws IOException, InterruptedException { + Configuration conf = new HdfsConfiguration(); + conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1); + conf.setBoolean(DFSConfigKeys.DFS_SUPPORT_APPEND_KEY, true); + //Set small soft-limit for lease + final long softLimit = 1L; + final long hardLimit = 9999999L; + + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1) + .build(); + cluster.setLeasePeriod(softLimit, hardLimit); + cluster.waitActive(); + + FileSystem fs = cluster.getFileSystem(); + FileSystem fs2 = new DistributedFileSystem(); + fs2.initialize(fs.getUri(), conf); + + final Path testPath = new Path("/testAppendAfterSoftLimit"); + final byte[] fileContents = AppendTestUtil.initBuffer(32); + + // create a new file without closing + FSDataOutputStream out = fs.create(testPath); + out.write(fileContents); + + //Wait for > soft-limit + Thread.sleep(250); + + try { + FSDataOutputStream appendStream2 = fs2.append(testPath); + appendStream2.write(fileContents); + appendStream2.close(); + assertEquals(fileContents.length, fs.getFileStatus(testPath).getLen()); + } finally { + fs.close(); + fs2.close(); + cluster.shutdown(); + } + } + } From 8df7e7deecad2b8131d67a1916b1ec4c9f7bc633 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 15 Aug 2013 23:05:41 +0000 Subject: [PATCH 017/153] HADOOP-9865. FileContext#globStatus has a regression with respect to relative path. (Contributed by Chaun Lin) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514531 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + .../java/org/apache/hadoop/fs/Globber.java | 8 +-- .../org/apache/hadoop/fs/TestGlobPaths.java | 55 ++++++++++++++----- 3 files changed, 47 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 9518bf278af..e18d4584299 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -337,6 +337,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9875. TestDoAsEffectiveUser can fail on JDK 7. (Aaron T. Myers via Colin Patrick McCabe) + HADOOP-9865. FileContext#globStatus has a regression with respect to + relative path. (Chuan Lin via Colin Patrick McCabe) + Release 2.1.1-beta - UNRELEASED diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index ad28478aeb8..378311a71a2 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -99,24 +99,24 @@ class Globber { } private String schemeFromPath(Path path) throws IOException { - String scheme = pathPattern.toUri().getScheme(); + String scheme = path.toUri().getScheme(); if (scheme == null) { if (fs != null) { scheme = fs.getUri().getScheme(); } else { - scheme = fc.getFSofPath(path).getUri().getScheme(); + scheme = fc.getDefaultFileSystem().getUri().getScheme(); } } return scheme; } private String authorityFromPath(Path path) throws IOException { - String authority = pathPattern.toUri().getAuthority(); + String authority = path.toUri().getAuthority(); if (authority == null) { if (fs != null) { authority = fs.getUri().getAuthority(); } else { - authority = fc.getFSofPath(path).getUri().getAuthority(); + authority = fc.getDefaultFileSystem().getUri().getAuthority(); } } return authority ; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java index b712be10f0f..820b00bb0b0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java @@ -622,21 +622,7 @@ public class TestGlobPaths { cleanupDFS(); } } - - @Test - public void pTestRelativePath() throws IOException { - try { - String [] files = new String[] {"a", "abc", "abc.p", "bacd"}; - Path[] matchedPath = prepareTesting("a*", files); - assertEquals(matchedPath.length, 3); - assertEquals(matchedPath[0], new Path(USER_DIR, path[0])); - assertEquals(matchedPath[1], new Path(USER_DIR, path[1])); - assertEquals(matchedPath[2], new Path(USER_DIR, path[2])); - } finally { - cleanupDFS(); - } - } - + /* Test {xx,yy} */ @Test public void pTestCurlyBracket() throws IOException { @@ -1061,4 +1047,43 @@ public class TestGlobPaths { public void testGlobFillsInSchemeOnFC() throws Exception { testOnFileContext(new TestGlobFillsInScheme()); } + + /** + * Test that globStatus works with relative paths. + **/ + private static class TestRelativePath implements FSTestWrapperGlobTest { + public void run(FSTestWrapper wrap, FileSystem fs, FileContext fc) + throws Exception { + String[] files = new String[] { "a", "abc", "abc.p", "bacd" }; + + Path[] path = new Path[files.length]; + for(int i=0; i < files.length; i++) { + path[i] = wrap.makeQualified(new Path(files[i])); + wrap.mkdir(path[i], FsPermission.getDirDefault(), true); + } + + Path patternPath = new Path("a*"); + Path[] globResults = FileUtil.stat2Paths(wrap.globStatus(patternPath, + new AcceptAllPathFilter()), + patternPath); + + for(int i=0; i < globResults.length; i++) { + globResults[i] = wrap.makeQualified(globResults[i]); + } + + assertEquals(globResults.length, 3); + assertEquals(USER_DIR + "/a;" + USER_DIR + "/abc;" + USER_DIR + "/abc.p", + TestPath.mergeStatuses(globResults)); + } + } + + @Test + public void testRelativePathOnFS() throws Exception { + testOnFileSystem(new TestRelativePath()); + } + + @Test + public void testRelativePathOnFC() throws Exception { + testOnFileContext(new TestRelativePath()); + } } From 99064ec9058df09e554d379950b0e40bf900f9a2 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 16 Aug 2013 04:17:08 +0000 Subject: [PATCH 018/153] HDFS-5100. TestNamenodeRetryCache fails on Windows due to incorrect cleanup. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514573 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 081c080868f..3ea6bbb88b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -345,6 +345,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-2994. If lease soft limit is recovered successfully the append can fail. (Tao Luo via shv) + HDFS-5100. TestNamenodeRetryCache fails on Windows due to incorrect cleanup. + (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java index 03ee9fd5d1a..54dda2fe8ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNamenodeRetryCache.java @@ -96,7 +96,7 @@ public class TestNamenodeRetryCache { * @throws AccessControlException */ @After public void cleanup() throws IOException { - namesystem.delete("/", true); + cluster.shutdown(); } public static void incrementCallId() { From d9de6a928df055647fa3c6138f3b9142a4f6c0b0 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 16 Aug 2013 04:38:03 +0000 Subject: [PATCH 019/153] HDFS-5103. TestDirectoryScanner fails on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514576 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hadoop/hdfs/server/datanode/TestDirectoryScanner.java | 6 ++---- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3ea6bbb88b4..44959ef4ffb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -348,6 +348,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5100. TestNamenodeRetryCache fails on Windows due to incorrect cleanup. (Chuan Liu via cnauroth) + HDFS-5103. TestDirectoryScanner fails on Windows. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java index 579ffe82f13..c5decf2eaa6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java @@ -394,12 +394,12 @@ public class TestDirectoryScanner { @Override public String getBasePath() { - return "/base"; + return (new File("/base")).getAbsolutePath(); } @Override public String getPath(String bpid) throws IOException { - return "/base/current/" + bpid; + return (new File("/base/current/" + bpid)).getAbsolutePath(); } @Override @@ -416,8 +416,6 @@ public class TestDirectoryScanner { void testScanInfoObject(long blockId, File blockFile, File metaFile) throws Exception { - assertEquals("/base/current/" + BPID_1 + "/finalized", - TEST_VOLUME.getFinalizedDir(BPID_1).getAbsolutePath()); DirectoryScanner.ScanInfo scanInfo = new DirectoryScanner.ScanInfo(blockId, blockFile, metaFile, TEST_VOLUME); assertEquals(blockId, scanInfo.getBlockId()); From 1836aceff9cc729665fa42f77111814e0f6d307b Mon Sep 17 00:00:00 2001 From: Konstantin Boudnik Date: Fri, 16 Aug 2013 05:36:53 +0000 Subject: [PATCH 020/153] Moving HDFS-5004 into 2.3.0 release section git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514583 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 44959ef4ffb..35b5fff516b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,9 +120,6 @@ Trunk (Unreleased) HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth) - HDFS-5004. Add additional JMX bean for NameNode status data - (Trevor Lorimer via cos) - OPTIMIZATIONS BUG FIXES @@ -259,6 +256,9 @@ Release 2.3.0 - UNRELEASED HDFS-4817. Make HDFS advisory caching configurable on a per-file basis. (Colin Patrick McCabe) + HDFS-5004. Add additional JMX bean for NameNode status data + (Trevor Lorimer via cos) + HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options. (shv) From cae55de2cd1f9ea068f3410c8bdea14cf55738cb Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Fri, 16 Aug 2013 08:11:04 +0000 Subject: [PATCH 021/153] MAPREDUCE-5462. In map-side sort, swap entire meta entries instead of indexes for better cache performance. (Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514608 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../org/apache/hadoop/mapred/MapTask.java | 51 ++++++++----------- 2 files changed, 23 insertions(+), 31 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 613a38a52f5..a4123719666 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -189,6 +189,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race conditions (jlowe via kihwal) + MAPREDUCE-5462. In map-side sort, swap entire meta entries instead of + indexes for better cache performance. (Sandy Ryza) + BUG FIXES MAPREDUCE-5385. Fixed a bug with JobContext getCacheFiles API. (Omkar Vinit diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java index e9d3ed78863..99f9eac81f7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java @@ -884,10 +884,10 @@ public class MapTask extends Task { byte[] kvbuffer; // main output buffer private final byte[] b0 = new byte[0]; - private static final int INDEX = 0; // index offset in acct - private static final int VALSTART = 1; // val offset in acct - private static final int KEYSTART = 2; // key offset in acct - private static final int PARTITION = 3; // partition offset in acct + private static final int VALSTART = 0; // val offset in acct + private static final int KEYSTART = 1; // key offset in acct + private static final int PARTITION = 2; // partition offset in acct + private static final int VALLEN = 3; // length of value private static final int NMETA = 4; // num meta ints private static final int METASIZE = NMETA * 4; // size in bytes @@ -1151,10 +1151,10 @@ public class MapTask extends Task { distanceTo(keystart, valend, bufvoid)); // write accounting info - kvmeta.put(kvindex + INDEX, kvindex); kvmeta.put(kvindex + PARTITION, partition); kvmeta.put(kvindex + KEYSTART, keystart); kvmeta.put(kvindex + VALSTART, valstart); + kvmeta.put(kvindex + VALLEN, distanceTo(valstart, valend)); // advance kvindex kvindex = (kvindex - NMETA + kvmeta.capacity()) % kvmeta.capacity(); } catch (MapBufferTooSmallException e) { @@ -1224,17 +1224,11 @@ public class MapTask extends Task { } /** - * For the given meta position, return the dereferenced position in the - * integer array. Each meta block contains several integers describing - * record data in its serialized form, but the INDEX is not necessarily - * related to the proximate metadata. The index value at the referenced int - * position is the start offset of the associated metadata block. So the - * metadata INDEX at metapos may point to the metadata described by the - * metadata block at metapos + k, which contains information about that - * serialized record. + * For the given meta position, return the offset into the int-sized + * kvmeta buffer. */ int offsetFor(int metapos) { - return kvmeta.get(metapos * NMETA + INDEX); + return metapos * NMETA; } /** @@ -1260,16 +1254,17 @@ public class MapTask extends Task { kvmeta.get(kvj + VALSTART) - kvmeta.get(kvj + KEYSTART)); } + final byte META_BUFFER_TMP[] = new byte[METASIZE]; /** - * Swap logical indices st i, j MOD offset capacity. + * Swap metadata for items i, j * @see IndexedSortable#swap */ public void swap(final int mi, final int mj) { - final int kvi = (mi % maxRec) * NMETA + INDEX; - final int kvj = (mj % maxRec) * NMETA + INDEX; - int tmp = kvmeta.get(kvi); - kvmeta.put(kvi, kvmeta.get(kvj)); - kvmeta.put(kvj, tmp); + int iOff = (mi % maxRec) * METASIZE; + int jOff = (mj % maxRec) * METASIZE; + System.arraycopy(kvbuffer, iOff, META_BUFFER_TMP, 0, METASIZE); + System.arraycopy(kvbuffer, jOff, kvbuffer, iOff, METASIZE); + System.arraycopy(META_BUFFER_TMP, 0, kvbuffer, jOff, METASIZE); } /** @@ -1601,9 +1596,9 @@ public class MapTask extends Task { while (spindex < mend && kvmeta.get(offsetFor(spindex % maxRec) + PARTITION) == i) { final int kvoff = offsetFor(spindex % maxRec); - key.reset(kvbuffer, kvmeta.get(kvoff + KEYSTART), - (kvmeta.get(kvoff + VALSTART) - - kvmeta.get(kvoff + KEYSTART))); + int keystart = kvmeta.get(kvoff + KEYSTART); + int valstart = kvmeta.get(kvoff + VALSTART); + key.reset(kvbuffer, keystart, valstart - keystart); getVBytesForOffset(kvoff, value); writer.append(key, value); ++spindex; @@ -1729,14 +1724,8 @@ public class MapTask extends Task { private void getVBytesForOffset(int kvoff, InMemValBytes vbytes) { // get the keystart for the next serialized value to be the end // of this value. If this is the last value in the buffer, use bufend - final int nextindex = kvoff == kvend - ? bufend - : kvmeta.get( - (kvoff - NMETA + kvmeta.capacity() + KEYSTART) % kvmeta.capacity()); - // calculate the length of the value - int vallen = (nextindex >= kvmeta.get(kvoff + VALSTART)) - ? nextindex - kvmeta.get(kvoff + VALSTART) - : (bufvoid - kvmeta.get(kvoff + VALSTART)) + nextindex; + final int vallen = kvmeta.get(kvoff + VALLEN); + assert vallen >= 0; vbytes.reset(kvbuffer, kvmeta.get(kvoff + VALSTART), vallen); } From 45694cce8e2abed07511c385bce9c7c387faf8f5 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Fri, 16 Aug 2013 16:13:55 +0000 Subject: [PATCH 022/153] Update CHANGES.txt to reflect merge of MR-1981 to branch-2.1-beta git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514768 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index a4123719666..7125f28e65c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -162,9 +162,6 @@ Release 2.3.0 - UNRELEASED OPTIMIZATIONS - MAPREDUCE-1981. Improve getSplits performance by using listLocatedStatus - (Hairong Kuang and Jason Lowe via jlowe) - BUG FIXES MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal @@ -192,6 +189,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5462. In map-side sort, swap entire meta entries instead of indexes for better cache performance. (Sandy Ryza) + MAPREDUCE-1981. Improve getSplits performance by using listLocatedStatus + (Hairong Kuang and Jason Lowe via jlowe) + BUG FIXES MAPREDUCE-5385. Fixed a bug with JobContext getCacheFiles API. (Omkar Vinit From 8d21926c2613062149d07d238022f993af4c9c03 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 16 Aug 2013 17:14:34 +0000 Subject: [PATCH 023/153] HDFS-5102. Snapshot names should not be allowed to contain slash characters. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514797 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../hdfs/server/namenode/FSDirectory.java | 4 +++ .../namenode/snapshot/TestSnapshot.java | 33 +++++++++++++++++ .../namenode/snapshot/TestSnapshotRename.java | 36 +++++++++++++++++++ 4 files changed, 76 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 35b5fff516b..00e206d00a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -350,6 +350,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5103. TestDirectoryScanner fails on Windows. (Chuan Liu via cnauroth) + HDFS-5102. Snapshot names should not be allowed to contain slash characters. + (jing9) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 9523a50a47d..532a2bfb218 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2093,6 +2093,10 @@ public class FSDirectory implements Closeable { /** Verify if the snapshot name is legal. */ void verifySnapshotName(String snapshotName, String path) throws PathComponentTooLongException { + if (snapshotName.contains(Path.SEPARATOR)) { + throw new HadoopIllegalArgumentException( + "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""); + } final byte[] bytes = DFSUtil.string2Bytes(snapshotName); verifyINodeName(bytes); verifyMaxComponentLength(bytes, path, 0); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index c84af965b79..27228bd0482 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; @@ -54,6 +55,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDi import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper.TestDirectoryTree.Node; import org.apache.hadoop.hdfs.tools.offlineImageViewer.OfflineImageViewer; import org.apache.hadoop.hdfs.tools.offlineImageViewer.XmlImageVisitor; +import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -341,6 +343,37 @@ public class TestSnapshot { assertEquals(oldStatus.getAccessTime(), snapshotStatus.getAccessTime()); } + /** + * Test creating a snapshot with illegal name + */ + @Test + public void testCreateSnapshotWithIllegalName() throws Exception { + final Path dir = new Path("/dir"); + hdfs.mkdirs(dir); + + final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR; + try { + hdfs.createSnapshot(dir, name1); + fail("Exception expected when an illegal name is given"); + } catch (RemoteException e) { + String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + + "\" is a reserved name."; + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + + String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""; + final String[] badNames = new String[] { "foo" + Path.SEPARATOR, + Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" }; + for (String badName : badNames) { + try { + hdfs.createSnapshot(dir, badName); + fail("Exception expected when an illegal name is given"); + } catch (RemoteException e) { + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + } + } + /** * Creating snapshots for a directory that is not snapshottable must fail. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java index ab1752a166d..386563bea1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.snapshot; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.util.List; @@ -29,11 +30,14 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.SnapshotException; import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiff; import org.apache.hadoop.hdfs.util.ReadOnlyList; +import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; import org.junit.Rule; @@ -190,4 +194,36 @@ public class TestSnapshotRename { exception.expectMessage(error); hdfs.renameSnapshot(sub1, "s1", "s2"); } + + /** + * Test renaming a snapshot with illegal name + */ + @Test + public void testRenameWithIllegalName() throws Exception { + DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); + // Create snapshots for sub1 + SnapshotTestHelper.createSnapshot(hdfs, sub1, "s1"); + + final String name1 = HdfsConstants.DOT_SNAPSHOT_DIR; + try { + hdfs.renameSnapshot(sub1, "s1", name1); + fail("Exception expected when an illegal name is given for rename"); + } catch (RemoteException e) { + String errorMsg = "\"" + HdfsConstants.DOT_SNAPSHOT_DIR + + "\" is a reserved name."; + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + + String errorMsg = "Snapshot name cannot contain \"" + Path.SEPARATOR + "\""; + final String[] badNames = new String[] { "foo" + Path.SEPARATOR, + Path.SEPARATOR + "foo", Path.SEPARATOR, "foo" + Path.SEPARATOR + "bar" }; + for (String badName : badNames) { + try { + hdfs.renameSnapshot(sub1, "s1", badName); + fail("Exception expected when an illegal name is given"); + } catch (RemoteException e) { + GenericTestUtils.assertExceptionContains(errorMsg, e); + } + } + } } From 218ea7380c69e1a2cd73482fba523fee33644288 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Fri, 16 Aug 2013 19:04:43 +0000 Subject: [PATCH 024/153] HDFS-5105. TestFsck fails on Windows. (Contributed by Chuan Liu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514852 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../hadoop/hdfs/server/namenode/TestFsck.java | 42 ++++++++++++------- 2 files changed, 29 insertions(+), 15 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 00e206d00a1..3df060e5fd6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -353,6 +353,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5102. Snapshot names should not be allowed to contain slash characters. (jing9) + HDFS-5105. TestFsck fails on Windows. (Chuan Liu via arp) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 931351d4ac7..754e56966d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -153,8 +153,8 @@ public class TestFsck { String outStr = runFsck(conf, 0, true, "/"); verifyAuditLogs(); assertEquals(aTime, fs.getFileStatus(file).getAccessTime()); - assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); System.out.println(outStr); + assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); if (fs != null) {try{fs.close();} catch(Exception e){}} cluster.shutdown(); @@ -194,18 +194,30 @@ public class TestFsck { // Turn off the logs Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger(); logger.setLevel(Level.OFF); - - // Audit log should contain one getfileinfo and one fsck - BufferedReader reader = new BufferedReader(new FileReader(auditLogFile)); - String line = reader.readLine(); - assertNotNull(line); - assertTrue("Expected getfileinfo event not found in audit log", - getfileinfoPattern.matcher(line).matches()); - line = reader.readLine(); - assertNotNull(line); - assertTrue("Expected fsck event not found in audit log", - fsckPattern.matcher(line).matches()); - assertNull("Unexpected event in audit log", reader.readLine()); + + BufferedReader reader = null; + try { + // Audit log should contain one getfileinfo and one fsck + reader = new BufferedReader(new FileReader(auditLogFile)); + String line = reader.readLine(); + assertNotNull(line); + assertTrue("Expected getfileinfo event not found in audit log", + getfileinfoPattern.matcher(line).matches()); + line = reader.readLine(); + assertNotNull(line); + assertTrue("Expected fsck event not found in audit log", fsckPattern + .matcher(line).matches()); + assertNull("Unexpected event in audit log", reader.readLine()); + } finally { + // Close the reader and remove the appender to release the audit log file + // handle after verifying the content of the file. + if (reader != null) { + reader.close(); + } + if (logger != null) { + logger.removeAllAppenders(); + } + } } @Test @@ -963,9 +975,9 @@ public class TestFsck { String outStr = runFsck(conf, 0, true, "/"); verifyAuditLogs(); assertEquals(aTime, fc.getFileStatus(symlink).getAccessTime()); - assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); - assertTrue(outStr.contains("Total symlinks:\t\t1\n")); System.out.println(outStr); + assertTrue(outStr.contains(NamenodeFsck.HEALTHY_STATUS)); + assertTrue(outStr.contains("Total symlinks:\t\t1")); util.cleanup(fs, fileName); } finally { if (fs != null) {try{fs.close();} catch(Exception e){}} From 9ba95136e2e06e1a4ca94a87ba89eae8050a9522 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 16 Aug 2013 22:28:41 +0000 Subject: [PATCH 025/153] HDFS-5106. TestDatanodeBlockScanner fails on Windows due to incorrect path format. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514911 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java | 3 ++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3df060e5fd6..f397a980a50 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -355,6 +355,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5105. TestFsck fails on Windows. (Chuan Liu via arp) + HDFS-5106. TestDatanodeBlockScanner fails on Windows due to incorrect path + format. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java index 30cb035ff90..eb28a14e5f3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java @@ -440,7 +440,8 @@ public class TestDatanodeBlockScanner { } } - private static final String BASE_PATH = "/data/current/finalized"; + private static final String BASE_PATH = (new File("/data/current/finalized")) + .getAbsolutePath(); @Test public void testReplicaInfoParsing() throws Exception { From 52f0259502de42e433588c299339bf5cd4ba1f8e Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 16 Aug 2013 22:35:19 +0000 Subject: [PATCH 026/153] HADOOP-9880. SASL changes from HADOOP-9421 breaks Secure HA NN. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514913 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/ipc/Server.java | 10 ++++++++- .../apache/hadoop/security/SaslRpcServer.java | 1 - .../DelegationTokenSecretManager.java | 22 +++++++++++++++++++ 4 files changed, 34 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e18d4584299..92d0df40f5d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -399,6 +399,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9868. Server must not advertise kerberos realm. (daryn via kihwal) + HADOOP-9880. SASL changes from HADOOP-9421 breaks Secure HA NN. (daryn via + jing9) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java index 1533b3d00c6..de43646a204 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java @@ -1311,7 +1311,15 @@ public abstract class Server { Throwable cause = e; while (cause != null) { if (cause instanceof InvalidToken) { - sendToClient = (InvalidToken) cause; + // FIXME: hadoop method signatures are restricting the SASL + // callbacks to only returning InvalidToken, but some services + // need to throw other exceptions (ex. NN + StandyException), + // so for now we'll tunnel the real exceptions via an + // InvalidToken's cause which normally is not set + if (cause.getCause() != null) { + cause = cause.getCause(); + } + sendToClient = (IOException) cause; break; } cause = cause.getCause(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java index 9408028ffa2..2390dfcd658 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SaslRpcServer.java @@ -127,7 +127,6 @@ public class SaslRpcServer { final CallbackHandler callback; switch (authMethod) { case TOKEN: { - secretManager.checkAvailableForRead(); callback = new SaslDigestCallbackHandler(secretManager, connection); break; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 98fb76216ce..17e2ccc61ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -81,6 +81,28 @@ public class DelegationTokenSecretManager return new DelegationTokenIdentifier(); } + @Override + public synchronized byte[] retrievePassword( + DelegationTokenIdentifier identifier) throws InvalidToken { + try { + // this check introduces inconsistency in the authentication to a + // HA standby NN. non-token auths are allowed into the namespace which + // decides whether to throw a StandbyException. tokens are a bit + // different in that a standby may be behind and thus not yet know + // of all tokens issued by the active NN. the following check does + // not allow ANY token auth, however it should allow known tokens in + checkAvailableForRead(); + } catch (StandbyException se) { + // FIXME: this is a hack to get around changing method signatures by + // tunneling a non-InvalidToken exception as the cause which the + // RPC server will unwrap before returning to the client + InvalidToken wrappedStandby = new InvalidToken("StandbyException"); + wrappedStandby.initCause(se); + throw wrappedStandby; + } + return super.retrievePassword(identifier); + } + @Override //SecretManager public void checkAvailableForRead() throws StandbyException { namesystem.checkOperation(OperationCategory.READ); From 1ad3fe46332586cea73c47ba06342f91359db561 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 16 Aug 2013 23:00:54 +0000 Subject: [PATCH 027/153] YARN-107. Fixed ResourceManager and clients to better handle forceKillApplication on non-running and finished applications. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1514918 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 +++ .../yarn/client/cli/ApplicationCLI.java | 12 +++++-- .../hadoop/yarn/client/cli/TestYarnCLI.java | 35 +++++++++++++++++++ .../resourcemanager/ClientRMService.java | 5 ++- .../resourcemanager/TestClientRMService.java | 22 ++++++++++++ 5 files changed, 73 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 9d10e2a6efe..e276d19489e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -70,6 +70,10 @@ Release 2.1.1-beta - UNRELEASED YARN-337. RM handles killed application tracking URL poorly (jlowe) + YARN-107. Fixed ResourceManager and clients to better handle + forceKillApplication on non-running and finished applications. (Xuan Gong + via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index fa22b29ddb9..16e55a6a72d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -35,6 +35,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.util.ToolRunner; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.util.ConverterUtils; @@ -164,8 +165,15 @@ public class ApplicationCLI extends YarnCLI { private void killApplication(String applicationId) throws YarnException, IOException { ApplicationId appId = ConverterUtils.toApplicationId(applicationId); - sysout.println("Killing application " + applicationId); - client.killApplication(appId); + ApplicationReport appReport = client.getApplicationReport(appId); + if (appReport.getYarnApplicationState() == YarnApplicationState.FINISHED + || appReport.getYarnApplicationState() == YarnApplicationState.KILLED + || appReport.getYarnApplicationState() == YarnApplicationState.FAILED) { + sysout.println("Application " + applicationId + " has already finished "); + } else { + sysout.println("Killing application " + applicationId); + client.killApplication(appId); + } } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 4bc405d67c4..8be8b68e491 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -26,6 +26,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.doThrow; import java.io.ByteArrayOutputStream; import java.io.PrintStream; @@ -320,10 +321,44 @@ public class TestYarnCLI { public void testKillApplication() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); + + ApplicationReport newApplicationReport2 = ApplicationReport.newInstance( + applicationId, ApplicationAttemptId.newInstance(applicationId, 1), + "user", "queue", "appname", "host", 124, null, + YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); + when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( + newApplicationReport2); int result = cli.run(new String[] { "-kill", applicationId.toString() }); assertEquals(0, result); + verify(client, times(0)).killApplication(any(ApplicationId.class)); + verify(sysOut).println( + "Application " + applicationId + " has already finished "); + + ApplicationReport newApplicationReport = ApplicationReport.newInstance( + applicationId, ApplicationAttemptId.newInstance(applicationId, 1), + "user", "queue", "appname", "host", 124, null, + YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); + when(client.getApplicationReport(any(ApplicationId.class))).thenReturn( + newApplicationReport); + result = cli.run(new String[] { "-kill", applicationId.toString() }); + assertEquals(0, result); verify(client).killApplication(any(ApplicationId.class)); verify(sysOut).println("Killing application application_1234_0005"); + + doThrow(new ApplicationNotFoundException("Application with id '" + + applicationId + "' doesn't exist in RM.")).when(client) + .getApplicationReport(applicationId); + cli = createAndGetAppCLI(); + try { + cli.run(new String[] { "-kill", applicationId.toString() }); + Assert.fail(); + } catch (Exception ex) { + Assert.assertTrue(ex instanceof ApplicationNotFoundException); + Assert.assertEquals("Application with id '" + applicationId + + "' doesn't exist in RM.", ex.getMessage()); + } } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 1f7a8477d6e..97f0ef8e0b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -353,9 +353,8 @@ public class ClientRMService extends AbstractService implements RMAuditLogger.logFailure(callerUGI.getUserName(), AuditConstants.KILL_APP_REQUEST, "UNKNOWN", "ClientRMService", "Trying to kill an absent application", applicationId); - throw RPCUtil - .getRemoteException("Trying to kill an absent application " - + applicationId); + throw new ApplicationNotFoundException("Trying to kill an absent" + + " application " + applicationId); } if (!checkAccess(callerUGI, application.getUser(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 4817f45e0eb..ff3c3aadda1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetQueueInfoResponse; +import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; @@ -197,6 +198,27 @@ public class TestClientRMService { } } + @Test + public void testForceKillApplication() throws YarnException { + RMContext rmContext = mock(RMContext.class); + when(rmContext.getRMApps()).thenReturn( + new ConcurrentHashMap()); + ClientRMService rmService = new ClientRMService(rmContext, null, null, + null, null); + ApplicationId applicationId = + BuilderUtils.newApplicationId(System.currentTimeMillis(), 0); + KillApplicationRequest request = + KillApplicationRequest.newInstance(applicationId); + try { + rmService.forceKillApplication(request); + Assert.fail(); + } catch (ApplicationNotFoundException ex) { + Assert.assertEquals(ex.getMessage(), + "Trying to kill an absent " + + "application " + request.getApplicationId()); + } + } + @Test public void testGetQueueInfo() throws Exception { YarnScheduler yarnScheduler = mock(YarnScheduler.class); From 214d4377fc151297c85b09273dfe8fdddae40d3d Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Sat, 17 Aug 2013 21:16:50 +0000 Subject: [PATCH 028/153] HDFS-5104 Support dotdot name in NFS LOOKUP operation. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515042 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../java/org/apache/hadoop/hdfs/DFSUtil.java | 14 ++- .../hdfs/server/namenode/FSDirectory.java | 13 +++ .../hdfs/server/namenode/TestINodeFile.java | 87 +++++++++++++------ 4 files changed, 87 insertions(+), 29 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f397a980a50..38da72197ab 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -291,6 +291,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5076 Add MXBean methods to query NN's transaction information and JournalNode's journal status. (jing9) + HDFS-5104 Support dotdot name in NFS LOOKUP operation (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index fd1fe04e0f8..e3b61abc0bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -70,6 +70,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocolPB.ClientDatanodeProtocolTranslatorPB; +import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; @@ -204,13 +205,20 @@ public class DFSUtil { String[] components = StringUtils.split(src, '/'); for (int i = 0; i < components.length; i++) { String element = components[i]; - if (element.equals("..") || - element.equals(".") || + if (element.equals(".") || (element.indexOf(":") >= 0) || (element.indexOf("/") >= 0)) { return false; } - + // ".." is allowed in path starting with /.reserved/.inodes + if (element.equals("..")) { + if (components.length > 4 + && components[1].equals(FSDirectory.DOT_RESERVED_STRING) + && components[2].equals(FSDirectory.DOT_INODES_STRING)) { + continue; + } + return false; + } // The string may start or end with a /, but not have // "//" in the middle. if (element.isEmpty() && i != components.length - 1 && diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 532a2bfb218..51642a8b23a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2730,6 +2730,19 @@ public class FSDirectory implements Closeable { throw new FileNotFoundException( "File for given inode path does not exist: " + src); } + + // Handle single ".." for NFS lookup support. + if ((pathComponents.length > 4) + && DFSUtil.bytes2String(pathComponents[4]).equals("..")) { + INode parent = inode.getParent(); + if (parent == null || parent.getId() == INodeId.ROOT_INODE_ID) { + // inode is root, or its parent is root. + return Path.SEPARATOR; + } else { + return parent.getFullPathName(); + } + } + StringBuilder path = id == INodeId.ROOT_INODE_ID ? new StringBuilder() : new StringBuilder(inode.getFullPathName()); for (int i = 4; i < pathComponents.length; i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java index 4d1a9a3f9b7..aa12a231fc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java @@ -45,6 +45,7 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.PermissionStatus; +import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; @@ -901,31 +902,65 @@ public class TestINodeFile { @Test public void testInodeReplacement() throws Exception { final Configuration conf = new Configuration(); - final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf). - numDataNodes(1).build(); - cluster.waitActive(); - final DistributedFileSystem hdfs = cluster.getFileSystem(); - final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); - - final Path dir = new Path("/dir"); - hdfs.mkdirs(dir); - INode dirNode = fsdir.getINode(dir.toString()); - INode dirNodeFromNode = fsdir.getInode(dirNode.getId()); - assertSame(dirNode, dirNodeFromNode); - - // set quota to dir, which leads to node replacement - hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); - dirNode = fsdir.getINode(dir.toString()); - assertTrue(dirNode instanceof INodeDirectoryWithQuota); - // the inode in inodeMap should also be replaced - dirNodeFromNode = fsdir.getInode(dirNode.getId()); - assertSame(dirNode, dirNodeFromNode); - - hdfs.setQuota(dir, -1, -1); - dirNode = fsdir.getINode(dir.toString()); - assertTrue(dirNode instanceof INodeDirectory); - // the inode in inodeMap should also be replaced - dirNodeFromNode = fsdir.getInode(dirNode.getId()); - assertSame(dirNode, dirNodeFromNode); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + final DistributedFileSystem hdfs = cluster.getFileSystem(); + final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); + + final Path dir = new Path("/dir"); + hdfs.mkdirs(dir); + INode dirNode = fsdir.getINode(dir.toString()); + INode dirNodeFromNode = fsdir.getInode(dirNode.getId()); + assertSame(dirNode, dirNodeFromNode); + + // set quota to dir, which leads to node replacement + hdfs.setQuota(dir, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + dirNode = fsdir.getINode(dir.toString()); + assertTrue(dirNode instanceof INodeDirectoryWithQuota); + // the inode in inodeMap should also be replaced + dirNodeFromNode = fsdir.getInode(dirNode.getId()); + assertSame(dirNode, dirNodeFromNode); + + hdfs.setQuota(dir, -1, -1); + dirNode = fsdir.getINode(dir.toString()); + assertTrue(dirNode instanceof INodeDirectory); + // the inode in inodeMap should also be replaced + dirNodeFromNode = fsdir.getInode(dirNode.getId()); + assertSame(dirNode, dirNodeFromNode); + } finally { + cluster.shutdown(); + } + } + + @Test + public void testDotdotInodePath() throws Exception { + final Configuration conf = new Configuration(); + MiniDFSCluster cluster = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + cluster.waitActive(); + final DistributedFileSystem hdfs = cluster.getFileSystem(); + final FSDirectory fsdir = cluster.getNamesystem().getFSDirectory(); + + final Path dir = new Path("/dir"); + hdfs.mkdirs(dir); + long dirId = fsdir.getINode(dir.toString()).getId(); + long parentId = fsdir.getINode("/").getId(); + String testPath = "/.reserved/.inodes/" + dirId + "/.."; + + DFSClient client = new DFSClient(NameNode.getAddress(conf), conf); + HdfsFileStatus status = client.getFileInfo(testPath); + assertTrue(parentId == status.getFileId()); + + // Test root's parent is still root + testPath = "/.reserved/.inodes/" + parentId + "/.."; + status = client.getFileInfo(testPath); + assertTrue(parentId == status.getFileId()); + + } finally { + cluster.shutdown(); + } } } From b7fb6fd6c45b0f8f78f6534fc169317f5702b72a Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Sun, 18 Aug 2013 16:49:27 +0000 Subject: [PATCH 029/153] HDFS-5107 Fix array copy error in Readdir and Readdirplus responses. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515166 13f79535-47bb-0310-9956-ffa450edef68 --- .../nfs/nfs3/response/READDIR3Response.java | 20 +++++++++---------- .../nfs3/response/READDIRPLUS3Response.java | 18 ++++++++--------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java index fa54c5459fb..9f8d6760b5b 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIR3Response.java @@ -17,12 +17,14 @@ */ package org.apache.hadoop.nfs.nfs3.response; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.collect.ObjectArrays; - /** * READDIR3 Response */ @@ -56,12 +58,11 @@ public class READDIR3Response extends NFS3Response { } public static class DirList3 { - final Entry3 entries[]; + final List entries; final boolean eof; public DirList3(Entry3[] entries, boolean eof) { - this.entries = ObjectArrays.newArray(entries, entries.length); - System.arraycopy(this.entries, 0, entries, 0, entries.length); + this.entries = Collections.unmodifiableList(Arrays.asList(entries)); this.eof = eof; } } @@ -102,12 +103,11 @@ public class READDIR3Response extends NFS3Response { if (getStatus() == Nfs3Status.NFS3_OK) { xdr.writeLongAsHyper(cookieVerf); - Entry3[] f = dirList.entries; - for (int i = 0; i < f.length; i++) { + for (Entry3 e : dirList.entries) { xdr.writeBoolean(true); // Value follows - xdr.writeLongAsHyper(f[i].getFileId()); - xdr.writeString(f[i].getName()); - xdr.writeLongAsHyper(f[i].getCookie()); + xdr.writeLongAsHyper(e.getFileId()); + xdr.writeString(e.getName()); + xdr.writeLongAsHyper(e.getCookie()); } xdr.writeBoolean(false); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java index 77794cf48a6..6b41cb27f7a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/response/READDIRPLUS3Response.java @@ -17,13 +17,15 @@ */ package org.apache.hadoop.nfs.nfs3.response; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3FileAttributes; import org.apache.hadoop.nfs.nfs3.Nfs3Status; import org.apache.hadoop.oncrpc.XDR; -import com.google.common.collect.ObjectArrays; - /** * READDIRPLUS3 Response */ @@ -60,16 +62,15 @@ public class READDIRPLUS3Response extends NFS3Response { } public static class DirListPlus3 { - EntryPlus3 entries[]; + List entries; boolean eof; public DirListPlus3(EntryPlus3[] entries, boolean eof) { - this.entries = ObjectArrays.newArray(entries, entries.length); - System.arraycopy(this.entries, 0, entries, 0, entries.length); + this.entries = Collections.unmodifiableList(Arrays.asList(entries)); this.eof = eof; } - EntryPlus3[] getEntries() { + List getEntries() { return entries; } @@ -101,10 +102,9 @@ public class READDIRPLUS3Response extends NFS3Response { if (getStatus() == Nfs3Status.NFS3_OK) { out.writeLongAsHyper(cookieVerf); - EntryPlus3[] f = dirListPlus.getEntries(); - for (int i = 0; i < f.length; i++) { + for (EntryPlus3 f : dirListPlus.getEntries()) { out.writeBoolean(true); // next - f[i].seralize(out); + f.seralize(out); } out.writeBoolean(false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 38da72197ab..242b2b256a4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -293,6 +293,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5104 Support dotdot name in NFS LOOKUP operation (brandonli) + HDFS-5107 Fix array copy error in Readdir and Readdirplus responses + (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 79a184505d5ed71125a92f9c236fcf93b13f954e Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 19 Aug 2013 01:53:34 +0000 Subject: [PATCH 030/153] YARN-643. Fixed ResourceManager to remove all tokens consistently on app finish. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515256 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../rmapp/attempt/RMAppAttemptImpl.java | 30 ++++++++-------- .../attempt/TestRMAppAttemptTransitions.java | 35 ++++++++++++++++--- .../security/TestAMRMTokens.java | 19 ++++++++++ 4 files changed, 68 insertions(+), 19 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index e276d19489e..dc178d7143a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -74,6 +74,9 @@ Release 2.1.1-beta - UNRELEASED forceKillApplication on non-running and finished applications. (Xuan Gong via vinodkv) + YARN-643. Fixed ResourceManager to remove all tokens consistently on app + finish. (Xuan Gong via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index e287c203728..1543110db03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -761,6 +761,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { rejectedEvent.getApplicationAttemptId().getApplicationId(), message) ); + + appAttempt.removeTokens(appAttempt); } } @@ -847,7 +849,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { @Override public void transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) { - ApplicationAttemptId appAttemptId = appAttempt.getAppAttemptId(); // Tell the AMS. Unregister from the ApplicationMasterService @@ -894,9 +895,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.eventHandler.handle(new AppRemovedSchedulerEvent(appAttemptId, finalAttemptState)); - // Remove the AppAttempt from the AMRMTokenSecretManager - appAttempt.rmContext.getAMRMTokenSecretManager() - .applicationMasterFinished(appAttemptId); + appAttempt.removeTokens(appAttempt); } } @@ -1015,7 +1014,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { " exitCode: " + status.getExitStatus() + " due to: " + status.getDiagnostics() + "." + "Failing this attempt."); - // Tell the app, scheduler super.transition(appAttempt, finishEvent); } @@ -1042,12 +1040,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.rmContext.getAMFinishingMonitor().unregister( appAttempt.getAppAttemptId()); - // Unregister from the ClientToAMTokenSecretManager - if (UserGroupInformation.isSecurityEnabled()) { - appAttempt.rmContext.getClientToAMTokenSecretManager() - .unRegisterApplication(appAttempt.getAppAttemptId()); - } - if(!appAttempt.submissionContext.getUnmanagedAM()) { // Tell the launcher to cleanup. appAttempt.eventHandler.handle(new AMLauncherEvent( @@ -1116,10 +1108,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.rmContext.getAMLivelinessMonitor().unregister(appAttemptId); - // Remove the AppAttempt from the AMRMTokenSecretManager - appAttempt.rmContext.getAMRMTokenSecretManager() - .applicationMasterFinished(appAttemptId); - appAttempt.progress = 1.0f; RMAppAttemptUnregistrationEvent unregisterEvent @@ -1267,4 +1255,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { + " MasterContainer: " + masterContainer); store.storeApplicationAttempt(this); } + + private void removeTokens(RMAppAttemptImpl appAttempt) { + // Unregister from the ClientToAMTokenSecretManager + if (UserGroupInformation.isSecurityEnabled()) { + appAttempt.rmContext.getClientToAMTokenSecretManager() + .unRegisterApplication(appAttempt.getAppAttemptId()); + } + + // Remove the AppAttempt from the AMRMTokenSecretManager + appAttempt.rmContext.getAMRMTokenSecretManager() + .applicationMasterFinished(appAttempt.getAppAttemptId()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index d61b5c9e6f6..5261d077d5c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -28,6 +28,7 @@ import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.spy; import java.util.Collections; import java.util.List; @@ -35,6 +36,7 @@ import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -102,6 +104,11 @@ public class TestRMAppAttemptTransitions { private RMApp application; private RMAppAttempt applicationAttempt; + + private Configuration conf = new Configuration(); + private AMRMTokenSecretManager amRMTokenManager = spy(new AMRMTokenSecretManager(conf)); + private ClientToAMTokenSecretManagerInRM clientToAMTokenManager = + spy(new ClientToAMTokenSecretManagerInRM()); private final class TestApplicationAttemptEventDispatcher implements EventHandler { @@ -163,14 +170,13 @@ public class TestRMAppAttemptTransitions { mock(ContainerAllocationExpirer.class); amLivelinessMonitor = mock(AMLivelinessMonitor.class); amFinishingMonitor = mock(AMLivelinessMonitor.class); - Configuration conf = new Configuration(); rmContext = new RMContextImpl(rmDispatcher, containerAllocationExpirer, amLivelinessMonitor, amFinishingMonitor, - null, new AMRMTokenSecretManager(conf), + null, amRMTokenManager, new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), - new ClientToAMTokenSecretManagerInRM()); + clientToAMTokenManager); RMStateStore store = mock(RMStateStore.class); ((RMContextImpl) rmContext).setStateStore(store); @@ -261,7 +267,11 @@ public class TestRMAppAttemptTransitions { assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001); assertEquals(0, applicationAttempt.getRanNodes().size()); assertNull(applicationAttempt.getFinalApplicationStatus()); - + if (UserGroupInformation.isSecurityEnabled()) { + verify(clientToAMTokenManager).registerApplication( + applicationAttempt.getAppAttemptId()); + } + assertNotNull(applicationAttempt.getAMRMToken()); // Check events verify(masterService). registerAppAttempt(applicationAttempt.getAppAttemptId()); @@ -288,6 +298,7 @@ public class TestRMAppAttemptTransitions { // this works for unmanaged and managed AM's because this is actually doing // verify(application).handle(anyObject()); verify(application).handle(any(RMAppRejectedEvent.class)); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } /** @@ -303,6 +314,7 @@ public class TestRMAppAttemptTransitions { assertEquals(0.0, (double)applicationAttempt.getProgress(), 0.0001); assertEquals(0, applicationAttempt.getRanNodes().size()); assertNull(applicationAttempt.getFinalApplicationStatus()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } /** @@ -377,6 +389,8 @@ public class TestRMAppAttemptTransitions { // Check events verify(application, times(2)).handle(any(RMAppFailedAttemptEvent.class)); + + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } /** @@ -422,6 +436,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getTrackingUrl()); assertEquals(container, applicationAttempt.getMasterContainer()); assertEquals(finalStatus, applicationAttempt.getFinalApplicationStatus()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 0); } /** @@ -442,6 +457,7 @@ public class TestRMAppAttemptTransitions { .getJustFinishedContainers().size()); assertEquals(container, applicationAttempt.getMasterContainer()); assertEquals(finalStatus, applicationAttempt.getFinalApplicationStatus()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @@ -592,6 +608,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId(), RMAppAttemptEventType.KILL)); testAppAttemptKilledState(null, EMPTY_DIAGNOSTICS); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test @@ -666,6 +683,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId(), cs)); assertEquals(RMAppAttemptState.FAILED, applicationAttempt.getAppAttemptState()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test @@ -709,6 +727,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test(timeout=10000) @@ -725,6 +744,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test(timeout=20000) @@ -742,6 +762,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId().getApplicationId()); assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl()); assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl()); + verifyTokenCount(applicationAttempt.getAppAttemptId(), 1); } @Test @@ -848,4 +869,10 @@ public class TestRMAppAttemptTransitions { diagnostics, 0); } + private void verifyTokenCount(ApplicationAttemptId appAttemptId, int count) { + verify(amRMTokenManager, times(count)).applicationMasterFinished(appAttemptId); + if (UserGroupInformation.isSecurityEnabled()) { + verify(clientToAMTokenManager, times(count)).unRegisterApplication(appAttemptId); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java index b0c04886945..aa894c5f6a9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java @@ -38,6 +38,8 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; @@ -46,6 +48,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MockRMW import org.apache.hadoop.yarn.server.resourcemanager.TestAMAuthorization.MyContainerManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptContainerFinishedEvent; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.Assert; import org.junit.Test; @@ -80,6 +84,7 @@ public class TestAMRMTokens { * * @throws Exception */ + @SuppressWarnings("unchecked") @Test public void testTokenExpiry() throws Exception { @@ -134,6 +139,20 @@ public class TestAMRMTokens { finishAMRequest.setTrackingUrl("url"); rmClient.finishApplicationMaster(finishAMRequest); + // Send RMAppAttemptEventType.CONTAINER_FINISHED to transit RMAppAttempt + // from Finishing state to Finished State. Both AMRMToken and + // ClientToAMToken will be removed. + ContainerStatus containerStatus = + BuilderUtils.newContainerStatus(attempt.getMasterContainer().getId(), + ContainerState.COMPLETE, + "AM Container Finished", 0); + rm.getRMContext() + .getDispatcher() + .getEventHandler() + .handle( + new RMAppAttemptContainerFinishedEvent(applicationAttemptId, + containerStatus)); + // Now simulate trying to allocate. RPC call itself should throw auth // exception. rpc.stopProxy(rmClient, conf); // To avoid using cached client From be0317e0211e9b107dd25e5f492cdbc0493ec5e0 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Mon, 19 Aug 2013 06:44:38 +0000 Subject: [PATCH 031/153] Add .classpath, .project and .settings to svn:ignore. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515283 13f79535-47bb-0310-9956-ffa450edef68 From c9b89de0eacf15f21faa3a7ba30d4773f571c9a4 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Mon, 19 Aug 2013 21:54:51 +0000 Subject: [PATCH 032/153] HDFS-5110 Change FSDataOutputStream to HdfsDataOutputStream for opened streams to fix type cast error. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515624 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 9 ++++----- .../org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 7 ++++--- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 3 files changed, 11 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index 301fedc508c..e13bebcc6f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -32,7 +32,6 @@ import java.util.concurrent.locks.ReentrantLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; @@ -70,7 +69,7 @@ class OpenFileCtx { // The stream write-back status. True means one thread is doing write back. private boolean asyncStatus; - private final FSDataOutputStream fos; + private final HdfsDataOutputStream fos; private final Nfs3FileAttributes latestAttr; private long nextOffset; @@ -114,7 +113,7 @@ class OpenFileCtx { return nonSequentialWriteInMemory; } - OpenFileCtx(FSDataOutputStream fos, Nfs3FileAttributes latestAttr, + OpenFileCtx(HdfsDataOutputStream fos, Nfs3FileAttributes latestAttr, String dumpFilePath) { this.fos = fos; this.latestAttr = latestAttr; @@ -438,7 +437,7 @@ class OpenFileCtx { FSDataInputStream fis = null; try { // Sync file data and length to avoid partial read failure - ((HdfsDataOutputStream) fos).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); + fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); fis = new FSDataInputStream(dfsClient.open(path)); readCount = fis.read(offset, readbuffer, 0, count); @@ -527,7 +526,7 @@ class OpenFileCtx { int ret = COMMIT_WAIT; try { // Sync file data and length - ((HdfsDataOutputStream) fos).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); + fos.hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); // Nothing to do for metadata since attr related change is pass-through ret = COMMIT_FINISHED; } catch (IOException e) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 8db8b1bb734..e96b537d1f2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -29,7 +29,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FileUtil; @@ -629,7 +629,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new CREATE3Response(Nfs3Status.NFS3ERR_INVAL); } - FSDataOutputStream fos = null; + HdfsDataOutputStream fos = null; String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); WccAttr preOpDirAttr = null; Nfs3FileAttributes postOpObjAttr = null; @@ -652,7 +652,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { EnumSet flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet .of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet .of(CreateFlag.CREATE); - fos = new FSDataOutputStream(dfsClient.create(fileIdPath, permission, + + fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission, flag, false, replication, blockSize, null, bufferSize, null), statistics); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 242b2b256a4..7d8dc365f86 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -296,6 +296,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5107 Fix array copy error in Readdir and Readdirplus responses (brandonli) + HDFS-5110 Change FSDataOutputStream to HdfsDataOutputStream for opened + streams to fix type cast error. (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 8f343e684c4672212ad206c412603e2a5b0ee733 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 19 Aug 2013 22:04:18 +0000 Subject: [PATCH 033/153] YARN-1006. Fixed broken rendering in the Nodes list web page on the RM web UI. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515629 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../yarn/server/resourcemanager/webapp/NodesPage.java | 6 +++--- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index dc178d7143a..6101901f12a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -77,6 +77,9 @@ Release 2.1.1-beta - UNRELEASED YARN-643. Fixed ResourceManager to remove all tokens consistently on app finish. (Xuan Gong via vinodkv) + YARN-1006. Fixed broken rendering in the Nodes list web page on the RM web + UI. (Xuan Gong via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java index 493fbad2cf6..87720e05960 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/NodesPage.java @@ -155,10 +155,10 @@ class NodesPage extends RmView { private String nodesTableInit() { StringBuilder b = tableInit().append(", aoColumnDefs: ["); - b.append("{'bSearchable': false, 'aTargets': [ 7 ]}"); + b.append("{'bSearchable': false, 'aTargets': [ 6 ]}"); b.append(", {'sType': 'title-numeric', 'bSearchable': false, " + - "'aTargets': [ 8, 9 ] }"); - b.append(", {'sType': 'title-numeric', 'aTargets': [ 5 ]}"); + "'aTargets': [ 7, 8 ] }"); + b.append(", {'sType': 'title-numeric', 'aTargets': [ 4 ]}"); b.append("]}"); return b.toString(); } From 23abbd8f649150d1c73834aea36de8ed53b3023c Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Mon, 19 Aug 2013 22:52:29 +0000 Subject: [PATCH 034/153] HADOOP-9860. Remove class HackedKeytab and HackedKeytabEncoder from hadoop-minikdc once jira DIRSERVER-1882 solved. (ywskycn via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515652 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 + hadoop-common-project/hadoop-minikdc/pom.xml | 2 +- .../kerberos/shared/keytab/HackedKeytab.java | 42 ------ .../shared/keytab/HackedKeytabEncoder.java | 121 ------------------ .../org/apache/hadoop/minikdc/MiniKdc.java | 6 +- .../apache/hadoop/minikdc/TestMiniKdc.java | 4 +- 6 files changed, 9 insertions(+), 169 deletions(-) delete mode 100644 hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java delete mode 100644 hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 92d0df40f5d..db23e0640df 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -316,6 +316,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9848. Create a MiniKDC for use with security testing. (ywskycn via tucu) + HADOOP-9860. Remove class HackedKeytab and HackedKeytabEncoder from + hadoop-minikdc once jira DIRSERVER-1882 solved. (ywskycn via tucu) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) diff --git a/hadoop-common-project/hadoop-minikdc/pom.xml b/hadoop-common-project/hadoop-minikdc/pom.xml index 554e4a6d17e..f3e663c2b96 100644 --- a/hadoop-common-project/hadoop-minikdc/pom.xml +++ b/hadoop-common-project/hadoop-minikdc/pom.xml @@ -38,7 +38,7 @@ org.apache.directory.server apacheds-all - 2.0.0-M14 + 2.0.0-M15 compile diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java deleted file mode 100644 index cf4680a1fa1..00000000000 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java +++ /dev/null @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.directory.server.kerberos.shared.keytab; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; - -//This is a hack for ApacheDS 2.0.0-M14 to be able to create -//keytab files with more than one principal. -//It needs to be in this package because the KeytabEncoder class is package -// private. -//This class can be removed once jira DIRSERVER-1882 -// (https://issues.apache.org/jira/browse/DIRSERVER-1882) solved -public class HackedKeytab extends Keytab { - - private byte[] keytabVersion = VERSION_52; - - public void write( File file, int principalCount ) throws IOException - { - HackedKeytabEncoder writer = new HackedKeytabEncoder(); - ByteBuffer buffer = writer.write( keytabVersion, getEntries(), - principalCount ); - writeFile( buffer, file ); - } - -} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java deleted file mode 100644 index 0e04d155f7a..00000000000 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.directory.server.kerberos.shared.keytab; - -import org.apache.directory.shared.kerberos.components.EncryptionKey; - -import java.nio.ByteBuffer; -import java.util.Iterator; -import java.util.List; - -//This is a hack for ApacheDS 2.0.0-M14 to be able to create -//keytab files with more than one principal. -//It needs to be in this package because the KeytabEncoder class is package -// private. -//This class can be removed once jira DIRSERVER-1882 -// (https://issues.apache.org/jira/browse/DIRSERVER-1882) solved -class HackedKeytabEncoder extends KeytabEncoder { - - ByteBuffer write( byte[] keytabVersion, List entries, - int principalCount ) - { - ByteBuffer buffer = ByteBuffer.allocate( 512 * principalCount); - putKeytabVersion(buffer, keytabVersion); - putKeytabEntries( buffer, entries ); - buffer.flip(); - return buffer; - } - - private void putKeytabVersion( ByteBuffer buffer, byte[] version ) - { - buffer.put( version ); - } - - private void putKeytabEntries( ByteBuffer buffer, List entries ) - { - Iterator iterator = entries.iterator(); - - while ( iterator.hasNext() ) - { - ByteBuffer entryBuffer = putKeytabEntry( iterator.next() ); - int size = entryBuffer.position(); - - entryBuffer.flip(); - - buffer.putInt( size ); - buffer.put( entryBuffer ); - } - } - - private ByteBuffer putKeytabEntry( KeytabEntry entry ) - { - ByteBuffer buffer = ByteBuffer.allocate( 100 ); - - putPrincipalName( buffer, entry.getPrincipalName() ); - - buffer.putInt( ( int ) entry.getPrincipalType() ); - - buffer.putInt( ( int ) ( entry.getTimeStamp().getTime() / 1000 ) ); - - buffer.put( entry.getKeyVersion() ); - - putKeyBlock( buffer, entry.getKey() ); - - return buffer; - } - - private void putPrincipalName( ByteBuffer buffer, String principalName ) - { - String[] split = principalName.split("@"); - String nameComponent = split[0]; - String realm = split[1]; - - String[] nameComponents = nameComponent.split( "/" ); - - // increment for v1 - buffer.putShort( ( short ) nameComponents.length ); - - putCountedString( buffer, realm ); - // write components - - for ( int ii = 0; ii < nameComponents.length; ii++ ) - { - putCountedString( buffer, nameComponents[ii] ); - } - } - - private void putKeyBlock( ByteBuffer buffer, EncryptionKey key ) - { - buffer.putShort( ( short ) key.getKeyType().getValue() ); - putCountedBytes( buffer, key.getKeyValue() ); - } - - private void putCountedString( ByteBuffer buffer, String string ) - { - byte[] data = string.getBytes(); - buffer.putShort( ( short ) data.length ); - buffer.put( data ); - } - - private void putCountedBytes( ByteBuffer buffer, byte[] data ) - { - buffer.putShort( ( short ) data.length ); - buffer.put( data ); - } - -} \ No newline at end of file diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java index d328cd31ed3..c8aa78a9f34 100644 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java +++ b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java @@ -37,7 +37,7 @@ import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition; import org.apache.directory.server.core.partition.ldif.LdifPartition; import org.apache.directory.server.kerberos.kdc.KdcServer; import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory; -import org.apache.directory.server.kerberos.shared.keytab.HackedKeytab; +import org.apache.directory.server.kerberos.shared.keytab.Keytab; import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry; import org.apache.directory.server.protocol.shared.transport.TcpTransport; import org.apache.directory.server.protocol.shared.transport.UdpTransport; @@ -514,7 +514,7 @@ public class MiniKdc { public void createPrincipal(File keytabFile, String ... principals) throws Exception { String generatedPassword = UUID.randomUUID().toString(); - HackedKeytab keytab = new HackedKeytab(); + Keytab keytab = new Keytab(); List entries = new ArrayList(); for (String principal : principals) { createPrincipal(principal, generatedPassword); @@ -529,6 +529,6 @@ public class MiniKdc { } } keytab.setEntries(entries); - keytab.write(keytabFile, principals.length); + keytab.write(keytabFile); } } \ No newline at end of file diff --git a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java index ff41519ae49..c1fc56daecd 100644 --- a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java +++ b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java @@ -137,7 +137,7 @@ public class TestMiniKdc extends KerberosSecurityTestcase { subject.getPrincipals().iterator().next().getClass()); Assert.assertEquals(principal + "@" + kdc.getRealm(), subject.getPrincipals().iterator().next().getName()); - loginContext.login(); + loginContext.logout(); //server login subject = new Subject(false, principals, new HashSet(), @@ -151,7 +151,7 @@ public class TestMiniKdc extends KerberosSecurityTestcase { subject.getPrincipals().iterator().next().getClass()); Assert.assertEquals(principal + "@" + kdc.getRealm(), subject.getPrincipals().iterator().next().getName()); - loginContext.login(); + loginContext.logout(); } finally { if (loginContext != null) { From ffdedf6b8be667ae5f71a79abde683c56db4326a Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Mon, 19 Aug 2013 23:02:24 +0000 Subject: [PATCH 035/153] HADOOP-9866. convert hadoop-auth testcases requiring kerberos to use minikdc. (ywskycn via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515657 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-auth/pom.xml | 61 +--------- .../authentication/KerberosTestUtils.java | 23 ++-- .../client/AuthenticatorTestCase.java | 26 ++-- .../client/TestAuthenticatedURL.java | 47 ++++---- .../client/TestKerberosAuthenticator.java | 60 +++++++--- .../client/TestPseudoAuthenticator.java | 61 ++++++---- .../TestAltKerberosAuthenticationHandler.java | 17 ++- .../server/TestAuthenticationFilter.java | 113 ++++++++++-------- .../server/TestAuthenticationToken.java | 72 ++++++----- .../TestKerberosAuthenticationHandler.java | 76 +++++++----- .../TestPseudoAuthenticationHandler.java | 29 +++-- .../authentication/util/TestKerberosName.java | 19 ++- .../authentication/util/TestKerberosUtil.java | 13 +- .../authentication/util/TestSigner.java | 36 +++--- .../hadoop-auth/src/test/resources/krb5.conf | 28 ----- .../hadoop-common/CHANGES.txt | 3 + 16 files changed, 356 insertions(+), 328 deletions(-) delete mode 100644 hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf diff --git a/hadoop-common-project/hadoop-auth/pom.xml b/hadoop-common-project/hadoop-auth/pom.xml index 9819b3fe084..e2beb0d49e7 100644 --- a/hadoop-common-project/hadoop-auth/pom.xml +++ b/hadoop-common-project/hadoop-auth/pom.xml @@ -33,7 +33,6 @@ yyyyMMdd - LOCALHOST @@ -83,38 +82,15 @@ slf4j-log4j12 runtime + + org.apache.hadoop + hadoop-minikdc + test + - - - ${basedir}/src/test/resources - true - - krb5.conf - - - - - org.apache.maven.plugins - maven-surefire-plugin - - always - 600 - - ${project.build.directory}/test-classes/krb5.conf - ${kerberos.realm} - - - **/${test.exclude}.java - ${test.exclude.pattern} - **/TestKerberosAuth*.java - **/TestAltKerberosAuth*.java - **/Test*$*.java - - - org.apache.maven.plugins maven-source-plugin @@ -134,33 +110,6 @@ - - testKerberos - - false - - - - - org.apache.maven.plugins - maven-surefire-plugin - - always - 600 - - ${project.build.directory}/test-classes/krb5.conf - ${kerberos.realm} - - - **/${test.exclude}.java - ${test.exclude.pattern} - **/Test*$*.java - - - - - - docs diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java index ea0f17f04cf..7629a302791 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java @@ -13,7 +13,6 @@ */ package org.apache.hadoop.security.authentication; - import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosPrincipal; import javax.security.auth.login.AppConfigurationEntry; @@ -26,6 +25,7 @@ import java.io.File; import java.security.Principal; import java.security.PrivilegedActionException; import java.security.PrivilegedExceptionAction; +import java.util.UUID; import java.util.HashMap; import java.util.HashSet; import java.util.Map; @@ -36,32 +36,23 @@ import java.util.concurrent.Callable; * Test helper class for Java Kerberos setup. */ public class KerberosTestUtils { - private static final String PREFIX = "hadoop-auth.test."; - - public static final String REALM = PREFIX + "kerberos.realm"; - - public static final String CLIENT_PRINCIPAL = PREFIX + "kerberos.client.principal"; - - public static final String SERVER_PRINCIPAL = PREFIX + "kerberos.server.principal"; - - public static final String KEYTAB_FILE = PREFIX + "kerberos.keytab.file"; + private static String keytabFile = new File(System.getProperty("test.dir", "target"), + UUID.randomUUID().toString()).toString(); public static String getRealm() { - return System.getProperty(REALM, "LOCALHOST"); + return "EXAMPLE.COM"; } public static String getClientPrincipal() { - return System.getProperty(CLIENT_PRINCIPAL, "client") + "@" + getRealm(); + return "client@EXAMPLE.COM"; } public static String getServerPrincipal() { - return System.getProperty(SERVER_PRINCIPAL, "HTTP/localhost") + "@" + getRealm(); + return "HTTP/localhost@EXAMPLE.COM"; } public static String getKeytabFile() { - String keytabFile = - new File(System.getProperty("user.home"), System.getProperty("user.name") + ".keytab").toString(); - return System.getProperty(KEYTAB_FILE, keytabFile); + return keytabFile; } private static class KerberosConfiguration extends Configuration { diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java index 6059d8caf83..ba7b43343d6 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/AuthenticatorTestCase.java @@ -2,9 +2,9 @@ * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * + * + * http://www.apache.org/licenses/LICENSE-2.0 + * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -13,10 +13,7 @@ */ package org.apache.hadoop.security.authentication.client; -import junit.framework.Assert; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; -import junit.framework.TestCase; -import org.mockito.Mockito; import org.mortbay.jetty.Server; import org.mortbay.jetty.servlet.Context; import org.mortbay.jetty.servlet.FilterHolder; @@ -27,19 +24,20 @@ import javax.servlet.ServletException; import javax.servlet.http.HttpServlet; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; -import java.io.BufferedReader; import java.io.IOException; import java.io.InputStream; -import java.io.InputStreamReader; import java.io.OutputStream; import java.io.OutputStreamWriter; +import java.io.BufferedReader; +import java.io.InputStreamReader; import java.io.Writer; import java.net.HttpURLConnection; import java.net.ServerSocket; import java.net.URL; import java.util.Properties; +import org.junit.Assert; -public abstract class AuthenticatorTestCase extends TestCase { +public class AuthenticatorTestCase { private Server server; private String host = null; private int port = -1; @@ -151,18 +149,18 @@ public abstract class AuthenticatorTestCase extends TestCase { writer.write(POST); writer.close(); } - assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); if (doPost) { BufferedReader reader = new BufferedReader(new InputStreamReader(conn.getInputStream())); String echo = reader.readLine(); - assertEquals(POST, echo); - assertNull(reader.readLine()); + Assert.assertEquals(POST, echo); + Assert.assertNull(reader.readLine()); } aUrl = new AuthenticatedURL(); conn = aUrl.openConnection(url, token); conn.connect(); - assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); - assertEquals(tokenStr, token.toString()); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + Assert.assertEquals(tokenStr, token.toString()); } finally { stop(); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java index 02ab92fac97..5be0b382f2f 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestAuthenticatedURL.java @@ -13,8 +13,8 @@ */ package org.apache.hadoop.security.authentication.client; -import junit.framework.Assert; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; import java.net.HttpURLConnection; @@ -24,46 +24,48 @@ import java.util.HashMap; import java.util.List; import java.util.Map; -public class TestAuthenticatedURL extends TestCase { +public class TestAuthenticatedURL { + @Test public void testToken() throws Exception { AuthenticatedURL.Token token = new AuthenticatedURL.Token(); - assertFalse(token.isSet()); + Assert.assertFalse(token.isSet()); token = new AuthenticatedURL.Token("foo"); - assertTrue(token.isSet()); - assertEquals("foo", token.toString()); + Assert.assertTrue(token.isSet()); + Assert.assertEquals("foo", token.toString()); AuthenticatedURL.Token token1 = new AuthenticatedURL.Token(); AuthenticatedURL.Token token2 = new AuthenticatedURL.Token(); - assertEquals(token1.hashCode(), token2.hashCode()); - assertTrue(token1.equals(token2)); + Assert.assertEquals(token1.hashCode(), token2.hashCode()); + Assert.assertTrue(token1.equals(token2)); token1 = new AuthenticatedURL.Token(); token2 = new AuthenticatedURL.Token("foo"); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); token1 = new AuthenticatedURL.Token("foo"); token2 = new AuthenticatedURL.Token(); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); token1 = new AuthenticatedURL.Token("foo"); token2 = new AuthenticatedURL.Token("foo"); - assertEquals(token1.hashCode(), token2.hashCode()); - assertTrue(token1.equals(token2)); + Assert.assertEquals(token1.hashCode(), token2.hashCode()); + Assert.assertTrue(token1.equals(token2)); token1 = new AuthenticatedURL.Token("bar"); token2 = new AuthenticatedURL.Token("foo"); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); token1 = new AuthenticatedURL.Token("foo"); token2 = new AuthenticatedURL.Token("bar"); - assertNotSame(token1.hashCode(), token2.hashCode()); - assertFalse(token1.equals(token2)); + Assert.assertNotSame(token1.hashCode(), token2.hashCode()); + Assert.assertFalse(token1.equals(token2)); } + @Test public void testInjectToken() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); AuthenticatedURL.Token token = new AuthenticatedURL.Token(); @@ -72,6 +74,7 @@ public class TestAuthenticatedURL extends TestCase { Mockito.verify(conn).addRequestProperty(Mockito.eq("Cookie"), Mockito.anyString()); } + @Test public void testExtractTokenOK() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); @@ -87,9 +90,10 @@ public class TestAuthenticatedURL extends TestCase { AuthenticatedURL.Token token = new AuthenticatedURL.Token(); AuthenticatedURL.extractToken(conn, token); - assertEquals(tokenStr, token.toString()); + Assert.assertEquals(tokenStr, token.toString()); } + @Test public void testExtractTokenFail() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); @@ -106,15 +110,16 @@ public class TestAuthenticatedURL extends TestCase { token.set("bar"); try { AuthenticatedURL.extractToken(conn, token); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected Assert.assertFalse(token.isSet()); } catch (Exception ex) { - fail(); + Assert.fail(); } } + @Test public void testConnectionConfigurator() throws Exception { HttpURLConnection conn = Mockito.mock(HttpURLConnection.class); Mockito.when(conn.getResponseCode()). diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java index 93d1d027a29..fd4b57258b9 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java @@ -13,17 +13,33 @@ */ package org.apache.hadoop.security.authentication.client; +import org.apache.hadoop.minikdc.KerberosSecurityTestcase; import org.apache.hadoop.security.authentication.KerberosTestUtils; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import java.io.File; import java.net.HttpURLConnection; import java.net.URL; import java.util.Properties; import java.util.concurrent.Callable; -public class TestKerberosAuthenticator extends AuthenticatorTestCase { +public class TestKerberosAuthenticator extends KerberosSecurityTestcase { + + @Before + public void setup() throws Exception { + // create keytab + File keytabFile = new File(KerberosTestUtils.getKeytabFile()); + String clientPrincipal = KerberosTestUtils.getClientPrincipal(); + String serverPrincipal = KerberosTestUtils.getServerPrincipal(); + clientPrincipal = clientPrincipal.substring(0, clientPrincipal.lastIndexOf("@")); + serverPrincipal = serverPrincipal.substring(0, serverPrincipal.lastIndexOf("@")); + getKdc().createPrincipal(keytabFile, clientPrincipal, serverPrincipal); + } private Properties getAuthenticationHandlerConfiguration() { Properties props = new Properties(); @@ -35,57 +51,67 @@ public class TestKerberosAuthenticator extends AuthenticatorTestCase { return props; } + @Test(timeout=60000) public void testFallbacktoPseudoAuthenticator() throws Exception { + AuthenticatorTestCase auth = new AuthenticatorTestCase(); Properties props = new Properties(); props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple"); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false"); - setAuthenticationHandlerConfig(props); - _testAuthentication(new KerberosAuthenticator(), false); + auth.setAuthenticationHandlerConfig(props); + auth._testAuthentication(new KerberosAuthenticator(), false); } + @Test(timeout=60000) public void testFallbacktoPseudoAuthenticatorAnonymous() throws Exception { + AuthenticatorTestCase auth = new AuthenticatorTestCase(); Properties props = new Properties(); props.setProperty(AuthenticationFilter.AUTH_TYPE, "simple"); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true"); - setAuthenticationHandlerConfig(props); - _testAuthentication(new KerberosAuthenticator(), false); + auth.setAuthenticationHandlerConfig(props); + auth._testAuthentication(new KerberosAuthenticator(), false); } + @Test(timeout=60000) public void testNotAuthenticated() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); - start(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); + auth.start(); try { - URL url = new URL(getBaseURL()); + URL url = new URL(auth.getBaseURL()); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.connect(); - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); - assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null); + Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); + Assert.assertTrue(conn.getHeaderField(KerberosAuthenticator.WWW_AUTHENTICATE) != null); } finally { - stop(); + auth.stop(); } } - + @Test(timeout=60000) public void testAuthentication() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); + final AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration()); KerberosTestUtils.doAsClient(new Callable() { @Override public Void call() throws Exception { - _testAuthentication(new KerberosAuthenticator(), false); + auth._testAuthentication(new KerberosAuthenticator(), false); return null; } }); } + @Test(timeout=60000) public void testAuthenticationPost() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration()); + final AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration()); KerberosTestUtils.doAsClient(new Callable() { @Override public Void call() throws Exception { - _testAuthentication(new KerberosAuthenticator(), true); + auth._testAuthentication(new KerberosAuthenticator(), true); return null; } }); } - } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java index 807052e8484..20ec587ac8f 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestPseudoAuthenticator.java @@ -15,12 +15,14 @@ package org.apache.hadoop.security.authentication.client; import org.apache.hadoop.security.authentication.server.AuthenticationFilter; import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler; +import org.junit.Assert; +import org.junit.Test; import java.net.HttpURLConnection; import java.net.URL; import java.util.Properties; -public class TestPseudoAuthenticator extends AuthenticatorTestCase { +public class TestPseudoAuthenticator { private Properties getAuthenticationHandlerConfiguration(boolean anonymousAllowed) { Properties props = new Properties(); @@ -29,55 +31,74 @@ public class TestPseudoAuthenticator extends AuthenticatorTestCase { return props; } + @Test public void testGetUserName() throws Exception { PseudoAuthenticator authenticator = new PseudoAuthenticator(); - assertEquals(System.getProperty("user.name"), authenticator.getUserName()); + Assert.assertEquals(System.getProperty("user.name"), authenticator.getUserName()); } + @Test public void testAnonymousAllowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true)); - start(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(true)); + auth.start(); try { - URL url = new URL(getBaseURL()); + URL url = new URL(auth.getBaseURL()); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.connect(); - assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); } finally { - stop(); + auth.stop(); } } + @Test public void testAnonymousDisallowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false)); - start(); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(false)); + auth.start(); try { - URL url = new URL(getBaseURL()); + URL url = new URL(auth.getBaseURL()); HttpURLConnection conn = (HttpURLConnection) url.openConnection(); conn.connect(); - assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); + Assert.assertEquals(HttpURLConnection.HTTP_UNAUTHORIZED, conn.getResponseCode()); } finally { - stop(); + auth.stop(); } } + @Test public void testAuthenticationAnonymousAllowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true)); - _testAuthentication(new PseudoAuthenticator(), false); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(true)); + auth._testAuthentication(new PseudoAuthenticator(), false); } + @Test public void testAuthenticationAnonymousDisallowed() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false)); - _testAuthentication(new PseudoAuthenticator(), false); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(false)); + auth._testAuthentication(new PseudoAuthenticator(), false); } + @Test public void testAuthenticationAnonymousAllowedWithPost() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(true)); - _testAuthentication(new PseudoAuthenticator(), true); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(true)); + auth._testAuthentication(new PseudoAuthenticator(), true); } + @Test public void testAuthenticationAnonymousDisallowedWithPost() throws Exception { - setAuthenticationHandlerConfig(getAuthenticationHandlerConfiguration(false)); - _testAuthentication(new PseudoAuthenticator(), true); + AuthenticatorTestCase auth = new AuthenticatorTestCase(); + auth.setAuthenticationHandlerConfig( + getAuthenticationHandlerConfiguration(false)); + auth._testAuthentication(new PseudoAuthenticator(), true); } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java index c2d43ebb3ca..3b838033090 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java @@ -18,6 +18,8 @@ import java.util.Properties; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import org.apache.hadoop.security.authentication.client.AuthenticationException; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; public class TestAltKerberosAuthenticationHandler @@ -45,6 +47,7 @@ public class TestAltKerberosAuthenticationHandler return AltKerberosAuthenticationHandler.TYPE; } + @Test(timeout=60000) public void testAlternateAuthenticationAsBrowser() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); @@ -54,11 +57,12 @@ public class TestAltKerberosAuthenticationHandler Mockito.when(request.getHeader("User-Agent")).thenReturn("Some Browser"); AuthenticationToken token = handler.authenticate(request, response); - assertEquals("A", token.getUserName()); - assertEquals("B", token.getName()); - assertEquals(getExpectedType(), token.getType()); + Assert.assertEquals("A", token.getUserName()); + Assert.assertEquals("B", token.getName()); + Assert.assertEquals(getExpectedType(), token.getType()); } + @Test(timeout=60000) public void testNonDefaultNonBrowserUserAgentAsBrowser() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); @@ -81,11 +85,12 @@ public class TestAltKerberosAuthenticationHandler Mockito.when(request.getHeader("User-Agent")).thenReturn("blah"); // Should use alt authentication AuthenticationToken token = handler.authenticate(request, response); - assertEquals("A", token.getUserName()); - assertEquals("B", token.getName()); - assertEquals(getExpectedType(), token.getType()); + Assert.assertEquals("A", token.getUserName()); + Assert.assertEquals("B", token.getName()); + Assert.assertEquals(getExpectedType(), token.getType()); } + @Test(timeout=60000) public void testNonDefaultNonBrowserUserAgentAsNonBrowser() throws Exception { if (handler != null) { handler.destroy(); diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java index 1c31e54ba52..6820151210c 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java @@ -16,7 +16,8 @@ package org.apache.hadoop.security.authentication.server; import org.apache.hadoop.security.authentication.client.AuthenticatedURL; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.util.Signer; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -34,8 +35,9 @@ import java.util.Arrays; import java.util.Properties; import java.util.Vector; -public class TestAuthenticationFilter extends TestCase { +public class TestAuthenticationFilter { + @Test public void testGetConfiguration() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); FilterConfig config = Mockito.mock(FilterConfig.class); @@ -43,27 +45,28 @@ public class TestAuthenticationFilter extends TestCase { Mockito.when(config.getInitParameter("a")).thenReturn("A"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("a")).elements()); Properties props = filter.getConfiguration("", config); - assertEquals("A", props.getProperty("a")); + Assert.assertEquals("A", props.getProperty("a")); config = Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo"); Mockito.when(config.getInitParameter("foo.a")).thenReturn("A"); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("foo.a")).elements()); props = filter.getConfiguration("foo.", config); - assertEquals("A", props.getProperty("a")); + Assert.assertEquals("A", props.getProperty("a")); } + @Test public void testInitEmpty() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { FilterConfig config = Mockito.mock(FilterConfig.class); Mockito.when(config.getInitParameterNames()).thenReturn(new Vector().elements()); filter.init(config); - fail(); + Assert.fail(); } catch (ServletException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } finally { filter.destroy(); } @@ -126,6 +129,7 @@ public class TestAuthenticationFilter extends TestCase { } } + @Test public void testInit() throws Exception { // minimal configuration & simple auth handler (Pseudo) @@ -138,11 +142,11 @@ public class TestAuthenticationFilter extends TestCase { new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE, AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements()); filter.init(config); - assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); - assertTrue(filter.isRandomSecret()); - assertNull(filter.getCookieDomain()); - assertNull(filter.getCookiePath()); - assertEquals(1000, filter.getValidity()); + Assert.assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); + Assert.assertTrue(filter.isRandomSecret()); + Assert.assertNull(filter.getCookieDomain()); + Assert.assertNull(filter.getCookiePath()); + Assert.assertEquals(1000, filter.getValidity()); } finally { filter.destroy(); } @@ -157,7 +161,7 @@ public class TestAuthenticationFilter extends TestCase { new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE, AuthenticationFilter.SIGNATURE_SECRET)).elements()); filter.init(config); - assertFalse(filter.isRandomSecret()); + Assert.assertFalse(filter.isRandomSecret()); } finally { filter.destroy(); } @@ -174,13 +178,12 @@ public class TestAuthenticationFilter extends TestCase { AuthenticationFilter.COOKIE_DOMAIN, AuthenticationFilter.COOKIE_PATH)).elements()); filter.init(config); - assertEquals(".foo.com", filter.getCookieDomain()); - assertEquals("/bar", filter.getCookiePath()); + Assert.assertEquals(".foo.com", filter.getCookieDomain()); + Assert.assertEquals("/bar", filter.getCookiePath()); } finally { filter.destroy(); } - // authentication handler lifecycle, and custom impl DummyAuthenticationHandler.reset(); filter = new AuthenticationFilter(); @@ -195,10 +198,10 @@ public class TestAuthenticationFilter extends TestCase { Arrays.asList(AuthenticationFilter.AUTH_TYPE, "management.operation.return")).elements()); filter.init(config); - assertTrue(DummyAuthenticationHandler.init); + Assert.assertTrue(DummyAuthenticationHandler.init); } finally { filter.destroy(); - assertTrue(DummyAuthenticationHandler.destroy); + Assert.assertTrue(DummyAuthenticationHandler.destroy); } // kerberos auth handler @@ -212,11 +215,12 @@ public class TestAuthenticationFilter extends TestCase { } catch (ServletException ex) { // Expected } finally { - assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); + Assert.assertEquals(KerberosAuthenticationHandler.class, filter.getAuthenticationHandler().getClass()); filter.destroy(); } } + @Test public void testGetRequestURL() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -235,12 +239,13 @@ public class TestAuthenticationFilter extends TestCase { Mockito.when(request.getRequestURL()).thenReturn(new StringBuffer("http://foo:8080/bar")); Mockito.when(request.getQueryString()).thenReturn("a=A&b=B"); - assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request)); + Assert.assertEquals("http://foo:8080/bar?a=A&b=B", filter.getRequestURL(request)); } finally { filter.destroy(); } } + @Test public void testGetToken() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -268,12 +273,13 @@ public class TestAuthenticationFilter extends TestCase { AuthenticationToken newToken = filter.getToken(request); - assertEquals(token.toString(), newToken.toString()); + Assert.assertEquals(token.toString(), newToken.toString()); } finally { filter.destroy(); } } + @Test public void testGetTokenExpired() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -300,17 +306,18 @@ public class TestAuthenticationFilter extends TestCase { try { filter.getToken(request); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } finally { filter.destroy(); } } + @Test public void testGetTokenInvalidType() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -338,17 +345,18 @@ public class TestAuthenticationFilter extends TestCase { try { filter.getToken(request); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } finally { filter.destroy(); } } + @Test public void testDoFilterNotAuthenticated() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -374,7 +382,7 @@ public class TestAuthenticationFilter extends TestCase { new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { - fail(); + Assert.fail(); return null; } } @@ -468,27 +476,27 @@ public class TestAuthenticationFilter extends TestCase { Mockito.verify(response, Mockito.never()). addCookie(Mockito.any(Cookie.class)); } else { - assertNotNull(setCookie[0]); - assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); - assertTrue(setCookie[0].getValue().contains("u=")); - assertTrue(setCookie[0].getValue().contains("p=")); - assertTrue(setCookie[0].getValue().contains("t=")); - assertTrue(setCookie[0].getValue().contains("e=")); - assertTrue(setCookie[0].getValue().contains("s=")); - assertTrue(calledDoFilter[0]); + Assert.assertNotNull(setCookie[0]); + Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); + Assert.assertTrue(setCookie[0].getValue().contains("u=")); + Assert.assertTrue(setCookie[0].getValue().contains("p=")); + Assert.assertTrue(setCookie[0].getValue().contains("t=")); + Assert.assertTrue(setCookie[0].getValue().contains("e=")); + Assert.assertTrue(setCookie[0].getValue().contains("s=")); + Assert.assertTrue(calledDoFilter[0]); Signer signer = new Signer("secret".getBytes()); String value = signer.verifyAndExtract(setCookie[0].getValue()); AuthenticationToken token = AuthenticationToken.parse(value); - assertEquals(System.currentTimeMillis() + 1000 * 1000, + Assert.assertEquals(System.currentTimeMillis() + 1000 * 1000, token.getExpires(), 100); if (withDomainPath) { - assertEquals(".foo.com", setCookie[0].getDomain()); - assertEquals("/bar", setCookie[0].getPath()); + Assert.assertEquals(".foo.com", setCookie[0].getDomain()); + Assert.assertEquals("/bar", setCookie[0].getPath()); } else { - assertNull(setCookie[0].getDomain()); - assertNull(setCookie[0].getPath()); + Assert.assertNull(setCookie[0].getDomain()); + Assert.assertNull(setCookie[0].getPath()); } } } finally { @@ -496,22 +504,27 @@ public class TestAuthenticationFilter extends TestCase { } } + @Test public void testDoFilterAuthentication() throws Exception { _testDoFilterAuthentication(false, false, false); } + @Test public void testDoFilterAuthenticationImmediateExpiration() throws Exception { _testDoFilterAuthentication(false, false, true); } + @Test public void testDoFilterAuthenticationWithInvalidToken() throws Exception { _testDoFilterAuthentication(false, true, false); } + @Test public void testDoFilterAuthenticationWithDomainPath() throws Exception { _testDoFilterAuthentication(true, false, false); } + @Test public void testDoFilterAuthenticated() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -547,8 +560,8 @@ public class TestAuthenticationFilter extends TestCase { public Object answer(InvocationOnMock invocation) throws Throwable { Object[] args = invocation.getArguments(); HttpServletRequest request = (HttpServletRequest) args[0]; - assertEquals("u", request.getRemoteUser()); - assertEquals("p", request.getUserPrincipal().getName()); + Assert.assertEquals("u", request.getRemoteUser()); + Assert.assertEquals("p", request.getUserPrincipal().getName()); return null; } } @@ -561,6 +574,7 @@ public class TestAuthenticationFilter extends TestCase { } } + @Test public void testDoFilterAuthenticatedExpired() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -594,7 +608,7 @@ public class TestAuthenticationFilter extends TestCase { new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { - fail(); + Assert.fail(); return null; } } @@ -616,15 +630,15 @@ public class TestAuthenticationFilter extends TestCase { Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); - assertNotNull(setCookie[0]); - assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); - assertEquals("", setCookie[0].getValue()); + Assert.assertNotNull(setCookie[0]); + Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); + Assert.assertEquals("", setCookie[0].getValue()); } finally { filter.destroy(); } } - + @Test public void testDoFilterAuthenticatedInvalidType() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { @@ -658,7 +672,7 @@ public class TestAuthenticationFilter extends TestCase { new Answer() { @Override public Object answer(InvocationOnMock invocation) throws Throwable { - fail(); + Assert.fail(); return null; } } @@ -680,14 +694,15 @@ public class TestAuthenticationFilter extends TestCase { Mockito.verify(response).sendError(Mockito.eq(HttpServletResponse.SC_UNAUTHORIZED), Mockito.anyString()); - assertNotNull(setCookie[0]); - assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); - assertEquals("", setCookie[0].getValue()); + Assert.assertNotNull(setCookie[0]); + Assert.assertEquals(AuthenticatedURL.AUTH_COOKIE, setCookie[0].getName()); + Assert.assertEquals("", setCookie[0].getValue()); } finally { filter.destroy(); } } + @Test public void testManagementOperation() throws Exception { AuthenticationFilter filter = new AuthenticationFilter(); try { diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java index bb5251f8339..c17c71033a7 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationToken.java @@ -14,98 +14,104 @@ package org.apache.hadoop.security.authentication.server; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; -public class TestAuthenticationToken extends TestCase { +public class TestAuthenticationToken { + @Test public void testAnonymous() { - assertNotNull(AuthenticationToken.ANONYMOUS); - assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName()); - assertEquals(null, AuthenticationToken.ANONYMOUS.getName()); - assertEquals(null, AuthenticationToken.ANONYMOUS.getType()); - assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires()); - assertFalse(AuthenticationToken.ANONYMOUS.isExpired()); + Assert.assertNotNull(AuthenticationToken.ANONYMOUS); + Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getUserName()); + Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getName()); + Assert.assertEquals(null, AuthenticationToken.ANONYMOUS.getType()); + Assert.assertEquals(-1, AuthenticationToken.ANONYMOUS.getExpires()); + Assert.assertFalse(AuthenticationToken.ANONYMOUS.isExpired()); } + @Test public void testConstructor() throws Exception { try { new AuthenticationToken(null, "p", "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("", "p", "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", null, "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", "", "t"); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", "p", null); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { new AuthenticationToken("u", "p", ""); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } new AuthenticationToken("u", "p", "t"); } + @Test public void testGetters() throws Exception { long expires = System.currentTimeMillis() + 50; AuthenticationToken token = new AuthenticationToken("u", "p", "t"); token.setExpires(expires); - assertEquals("u", token.getUserName()); - assertEquals("p", token.getName()); - assertEquals("t", token.getType()); - assertEquals(expires, token.getExpires()); - assertFalse(token.isExpired()); + Assert.assertEquals("u", token.getUserName()); + Assert.assertEquals("p", token.getName()); + Assert.assertEquals("t", token.getType()); + Assert.assertEquals(expires, token.getExpires()); + Assert.assertFalse(token.isExpired()); Thread.sleep(70); // +20 msec fuzz for timer granularity. - assertTrue(token.isExpired()); + Assert.assertTrue(token.isExpired()); } + @Test public void testToStringAndParse() throws Exception { long expires = System.currentTimeMillis() + 50; AuthenticationToken token = new AuthenticationToken("u", "p", "t"); token.setExpires(expires); String str = token.toString(); token = AuthenticationToken.parse(str); - assertEquals("p", token.getName()); - assertEquals("t", token.getType()); - assertEquals(expires, token.getExpires()); - assertFalse(token.isExpired()); + Assert.assertEquals("p", token.getName()); + Assert.assertEquals("t", token.getType()); + Assert.assertEquals(expires, token.getExpires()); + Assert.assertFalse(token.isExpired()); Thread.sleep(70); // +20 msec fuzz for timer granularity. - assertTrue(token.isExpired()); + Assert.assertTrue(token.isExpired()); } + @Test public void testParseInvalid() throws Exception { long expires = System.currentTimeMillis() + 50; AuthenticationToken token = new AuthenticationToken("u", "p", "t"); @@ -114,11 +120,11 @@ public class TestAuthenticationToken extends TestCase { str = str.substring(0, str.indexOf("e=")); try { AuthenticationToken.parse(str); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java index d198e58431d..ab793b7c61d 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java @@ -13,25 +13,31 @@ */ package org.apache.hadoop.security.authentication.server; +import org.apache.hadoop.minikdc.KerberosSecurityTestcase; import org.apache.hadoop.security.authentication.KerberosTestUtils; import org.apache.hadoop.security.authentication.client.AuthenticationException; import org.apache.hadoop.security.authentication.client.KerberosAuthenticator; -import junit.framework.TestCase; import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.security.authentication.util.KerberosName; import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.ietf.jgss.GSSContext; import org.ietf.jgss.GSSManager; import org.ietf.jgss.GSSName; +import org.junit.After; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; import org.mockito.Mockito; import org.ietf.jgss.Oid; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; +import java.io.File; import java.util.Properties; import java.util.concurrent.Callable; -public class TestKerberosAuthenticationHandler extends TestCase { +public class TestKerberosAuthenticationHandler + extends KerberosSecurityTestcase { protected KerberosAuthenticationHandler handler; @@ -54,9 +60,16 @@ public class TestKerberosAuthenticationHandler extends TestCase { return props; } - @Override - protected void setUp() throws Exception { - super.setUp(); + @Before + public void setup() throws Exception { + // create keytab + File keytabFile = new File(KerberosTestUtils.getKeytabFile()); + String clientPrincipal = KerberosTestUtils.getClientPrincipal(); + String serverPrincipal = KerberosTestUtils.getServerPrincipal(); + clientPrincipal = clientPrincipal.substring(0, clientPrincipal.lastIndexOf("@")); + serverPrincipal = serverPrincipal.substring(0, serverPrincipal.lastIndexOf("@")); + getKdc().createPrincipal(keytabFile, clientPrincipal, serverPrincipal); + // handler handler = getNewAuthenticationHandler(); Properties props = getDefaultProperties(); try { @@ -67,18 +80,10 @@ public class TestKerberosAuthenticationHandler extends TestCase { } } - @Override - protected void tearDown() throws Exception { - if (handler != null) { - handler.destroy(); - handler = null; - } - super.tearDown(); - } - + @Test(timeout=60000) public void testNameRules() throws Exception { KerberosName kn = new KerberosName(KerberosTestUtils.getServerPrincipal()); - assertEquals(KerberosTestUtils.getRealm(), kn.getRealm()); + Assert.assertEquals(KerberosTestUtils.getRealm(), kn.getRealm()); //destroy handler created in setUp() handler.destroy(); @@ -93,30 +98,32 @@ public class TestKerberosAuthenticationHandler extends TestCase { } catch (Exception ex) { } kn = new KerberosName("bar@BAR"); - assertEquals("bar", kn.getShortName()); + Assert.assertEquals("bar", kn.getShortName()); kn = new KerberosName("bar@FOO"); try { kn.getShortName(); - fail(); + Assert.fail(); } catch (Exception ex) { } } - + + @Test(timeout=60000) public void testInit() throws Exception { - assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal()); - assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab()); + Assert.assertEquals(KerberosTestUtils.getServerPrincipal(), handler.getPrincipal()); + Assert.assertEquals(KerberosTestUtils.getKeytabFile(), handler.getKeytab()); } + @Test(timeout=60000) public void testType() throws Exception { - assertEquals(getExpectedType(), handler.getType()); + Assert.assertEquals(getExpectedType(), handler.getType()); } public void testRequestWithoutAuthorization() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); - assertNull(handler.authenticate(request, response)); + Assert.assertNull(handler.authenticate(request, response)); Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE); Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED); } @@ -126,11 +133,12 @@ public class TestKerberosAuthenticationHandler extends TestCase { HttpServletResponse response = Mockito.mock(HttpServletResponse.class); Mockito.when(request.getHeader(KerberosAuthenticator.AUTHORIZATION)).thenReturn("invalid"); - assertNull(handler.authenticate(request, response)); + Assert.assertNull(handler.authenticate(request, response)); Mockito.verify(response).setHeader(KerberosAuthenticator.WWW_AUTHENTICATE, KerberosAuthenticator.NEGOTIATE); Mockito.verify(response).setStatus(HttpServletResponse.SC_UNAUTHORIZED); } + @Test(timeout=60000) public void testRequestWithIncompleteAuthorization() throws Exception { HttpServletRequest request = Mockito.mock(HttpServletRequest.class); HttpServletResponse response = Mockito.mock(HttpServletResponse.class); @@ -139,15 +147,14 @@ public class TestKerberosAuthenticationHandler extends TestCase { .thenReturn(KerberosAuthenticator.NEGOTIATE); try { handler.authenticate(request, response); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } - public void testRequestWithAuthorization() throws Exception { String token = KerberosTestUtils.doAsClient(new Callable() { @Override @@ -191,9 +198,9 @@ public class TestKerberosAuthenticationHandler extends TestCase { Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*")); Mockito.verify(response).setStatus(HttpServletResponse.SC_OK); - assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName()); - assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName())); - assertEquals(getExpectedType(), authToken.getType()); + Assert.assertEquals(KerberosTestUtils.getClientPrincipal(), authToken.getName()); + Assert.assertTrue(KerberosTestUtils.getClientPrincipal().startsWith(authToken.getUserName())); + Assert.assertEquals(getExpectedType(), authToken.getType()); } else { Mockito.verify(response).setHeader(Mockito.eq(KerberosAuthenticator.WWW_AUTHENTICATE), Mockito.matches(KerberosAuthenticator.NEGOTIATE + " .*")); @@ -213,12 +220,19 @@ public class TestKerberosAuthenticationHandler extends TestCase { try { handler.authenticate(request, response); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } } + @After + public void tearDown() throws Exception { + if (handler != null) { + handler.destroy(); + handler = null; + } + } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java index dbc2c368336..da7eda7bc8e 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestPseudoAuthenticationHandler.java @@ -14,33 +14,37 @@ package org.apache.hadoop.security.authentication.server; import org.apache.hadoop.security.authentication.client.AuthenticationException; -import junit.framework.TestCase; import org.apache.hadoop.security.authentication.client.PseudoAuthenticator; +import org.junit.Assert; +import org.junit.Test; import org.mockito.Mockito; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import java.util.Properties; -public class TestPseudoAuthenticationHandler extends TestCase { +public class TestPseudoAuthenticationHandler { + @Test public void testInit() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); try { Properties props = new Properties(); props.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "false"); handler.init(props); - assertEquals(false, handler.getAcceptAnonymous()); + Assert.assertEquals(false, handler.getAcceptAnonymous()); } finally { handler.destroy(); } } + @Test public void testType() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); - assertEquals(PseudoAuthenticationHandler.TYPE, handler.getType()); + Assert.assertEquals(PseudoAuthenticationHandler.TYPE, handler.getType()); } + @Test public void testAnonymousOn() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); try { @@ -53,12 +57,13 @@ public class TestPseudoAuthenticationHandler extends TestCase { AuthenticationToken token = handler.authenticate(request, response); - assertEquals(AuthenticationToken.ANONYMOUS, token); + Assert.assertEquals(AuthenticationToken.ANONYMOUS, token); } finally { handler.destroy(); } } + @Test public void testAnonymousOff() throws Exception { PseudoAuthenticationHandler handler = new PseudoAuthenticationHandler(); try { @@ -70,11 +75,11 @@ public class TestPseudoAuthenticationHandler extends TestCase { HttpServletResponse response = Mockito.mock(HttpServletResponse.class); handler.authenticate(request, response); - fail(); + Assert.fail(); } catch (AuthenticationException ex) { // Expected } catch (Exception ex) { - fail(); + Assert.fail(); } finally { handler.destroy(); } @@ -93,19 +98,21 @@ public class TestPseudoAuthenticationHandler extends TestCase { AuthenticationToken token = handler.authenticate(request, response); - assertNotNull(token); - assertEquals("user", token.getUserName()); - assertEquals("user", token.getName()); - assertEquals(PseudoAuthenticationHandler.TYPE, token.getType()); + Assert.assertNotNull(token); + Assert.assertEquals("user", token.getUserName()); + Assert.assertEquals("user", token.getName()); + Assert.assertEquals(PseudoAuthenticationHandler.TYPE, token.getType()); } finally { handler.destroy(); } } + @Test public void testUserNameAnonymousOff() throws Exception { _testUserName(false); } + @Test public void testUserNameAnonymousOn() throws Exception { _testUserName(true); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java index b6c0b0fb2ec..e82a0a6c182 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosName.java @@ -21,14 +21,19 @@ package org.apache.hadoop.security.authentication.util; import java.io.IOException; import org.apache.hadoop.security.authentication.KerberosTestUtils; +import org.junit.After; import org.junit.Before; import org.junit.Test; -import static org.junit.Assert.*; + +import org.junit.Assert; public class TestKerberosName { @Before public void setUp() throws Exception { + System.setProperty("java.security.krb5.realm", KerberosTestUtils.getRealm()); + System.setProperty("java.security.krb5.kdc", "localhost:88"); + String rules = "RULE:[1:$1@$0](.*@YAHOO\\.COM)s/@.*//\n" + "RULE:[2:$1](johndoe)s/^.*$/guest/\n" + @@ -44,7 +49,7 @@ public class TestKerberosName { KerberosName nm = new KerberosName(from); String simple = nm.getShortName(); System.out.println("to " + simple); - assertEquals("short name incorrect", to, simple); + Assert.assertEquals("short name incorrect", to, simple); } @Test @@ -61,7 +66,7 @@ public class TestKerberosName { System.out.println("Checking " + name + " to ensure it is bad."); try { new KerberosName(name); - fail("didn't get exception for " + name); + Assert.fail("didn't get exception for " + name); } catch (IllegalArgumentException iae) { // PASS } @@ -72,7 +77,7 @@ public class TestKerberosName { KerberosName nm = new KerberosName(from); try { nm.getShortName(); - fail("didn't get exception for " + from); + Assert.fail("didn't get exception for " + from); } catch (IOException ie) { // PASS } @@ -85,4 +90,10 @@ public class TestKerberosName { checkBadTranslation("foo@ACME.COM"); checkBadTranslation("root/joe@FOO.COM"); } + + @After + public void clear() { + System.clearProperty("java.security.krb5.realm"); + System.clearProperty("java.security.krb5.kdc"); + } } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java index 4c91e2bf13a..7da78aa20e0 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestKerberosUtil.java @@ -16,11 +16,10 @@ */ package org.apache.hadoop.security.authentication.util; -import static org.junit.Assert.*; +import org.junit.Assert; import java.io.IOException; -import org.apache.hadoop.security.authentication.util.KerberosUtil; import org.junit.Test; public class TestKerberosUtil { @@ -32,23 +31,23 @@ public class TestKerberosUtil { String testHost = "FooBar"; // send null hostname - assertEquals("When no hostname is sent", + Assert.assertEquals("When no hostname is sent", service + "/" + localHostname.toLowerCase(), KerberosUtil.getServicePrincipal(service, null)); // send empty hostname - assertEquals("When empty hostname is sent", + Assert.assertEquals("When empty hostname is sent", service + "/" + localHostname.toLowerCase(), KerberosUtil.getServicePrincipal(service, "")); // send 0.0.0.0 hostname - assertEquals("When 0.0.0.0 hostname is sent", + Assert.assertEquals("When 0.0.0.0 hostname is sent", service + "/" + localHostname.toLowerCase(), KerberosUtil.getServicePrincipal(service, "0.0.0.0")); // send uppercase hostname - assertEquals("When uppercase hostname is sent", + Assert.assertEquals("When uppercase hostname is sent", service + "/" + testHost.toLowerCase(), KerberosUtil.getServicePrincipal(service, testHost)); // send lowercase hostname - assertEquals("When lowercase hostname is sent", + Assert.assertEquals("When lowercase hostname is sent", service + "/" + testHost.toLowerCase(), KerberosUtil.getServicePrincipal(service, testHost.toLowerCase())); } diff --git a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java index 9b3d1a2a2a6..e7cd0e1a8ed 100644 --- a/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java +++ b/hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/util/TestSigner.java @@ -13,68 +13,75 @@ */ package org.apache.hadoop.security.authentication.util; -import junit.framework.TestCase; +import org.junit.Assert; +import org.junit.Test; -public class TestSigner extends TestCase { +public class TestSigner { + @Test public void testNoSecret() throws Exception { try { new Signer(null); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { } } + @Test public void testNullAndEmptyString() throws Exception { Signer signer = new Signer("secret".getBytes()); try { signer.sign(null); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } try { signer.sign(""); - fail(); + Assert.fail(); } catch (IllegalArgumentException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } } + @Test public void testSignature() throws Exception { Signer signer = new Signer("secret".getBytes()); String s1 = signer.sign("ok"); String s2 = signer.sign("ok"); String s3 = signer.sign("wrong"); - assertEquals(s1, s2); - assertNotSame(s1, s3); + Assert.assertEquals(s1, s2); + Assert.assertNotSame(s1, s3); } + @Test public void testVerify() throws Exception { Signer signer = new Signer("secret".getBytes()); String t = "test"; String s = signer.sign(t); String e = signer.verifyAndExtract(s); - assertEquals(t, e); + Assert.assertEquals(t, e); } + @Test public void testInvalidSignedText() throws Exception { Signer signer = new Signer("secret".getBytes()); try { signer.verifyAndExtract("test"); - fail(); + Assert.fail(); } catch (SignerException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } } + @Test public void testTampering() throws Exception { Signer signer = new Signer("secret".getBytes()); String t = "test"; @@ -82,12 +89,11 @@ public class TestSigner extends TestCase { s += "x"; try { signer.verifyAndExtract(s); - fail(); + Assert.fail(); } catch (SignerException ex) { // Expected } catch (Throwable ex) { - fail(); + Assert.fail(); } } - } diff --git a/hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf b/hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf deleted file mode 100644 index c9f956705fa..00000000000 --- a/hadoop-common-project/hadoop-auth/src/test/resources/krb5.conf +++ /dev/null @@ -1,28 +0,0 @@ -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -[libdefaults] - default_realm = ${kerberos.realm} - udp_preference_limit = 1 - extra_addresses = 127.0.0.1 -[realms] - ${kerberos.realm} = { - admin_server = localhost:88 - kdc = localhost:88 - } -[domain_realm] - localhost = ${kerberos.realm} diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index db23e0640df..c14c615fa35 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -319,6 +319,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9860. Remove class HackedKeytab and HackedKeytabEncoder from hadoop-minikdc once jira DIRSERVER-1882 solved. (ywskycn via tucu) + HADOOP-9866. convert hadoop-auth testcases requiring kerberos to + use minikdc. (ywskycn via tucu) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) From 172d0cef6909779f6f167fa62439f0553d2f69b1 Mon Sep 17 00:00:00 2001 From: Steve Loughran Date: Mon, 19 Aug 2013 23:49:27 +0000 Subject: [PATCH 036/153] HADOOP-9487 Deprecation warnings in Configuration should go to their own log or otherwise be suppressible git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515672 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/main/conf/log4j.properties | 10 +++++++++- .../java/org/apache/hadoop/conf/Configuration.java | 9 ++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c14c615fa35..ca7be334c23 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -322,6 +322,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9866. convert hadoop-auth testcases requiring kerberos to use minikdc. (ywskycn via tucu) + HADOOP-9487 Deprecation warnings in Configuration should go to their + own log or otherwise be suppressible (Chu Tong via stevel) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index aef773a5f79..d436db9df3b 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -130,6 +130,13 @@ log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd +# +# hadoop configuration logging +# + +# Uncomment the following line to turn off configuration deprecation warnings. +# log4j.logger.org.apache.hadoop.conf.Configuration.deprecation=WARN + # # hdfs audit logging # @@ -231,4 +238,5 @@ log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n #log4j.appender.HSAUDIT.File=${hadoop.log.dir}/hs-audit.log #log4j.appender.HSAUDIT.layout=org.apache.log4j.PatternLayout #log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n -#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd \ No newline at end of file +#log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd + diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 9bc7472da8a..71d5ce4320a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -153,6 +153,10 @@ import com.google.common.base.Preconditions; * will be resolved to another property in this Configuration, while * ${user.name} would then ordinarily be resolved to the value * of the System property with that name. + * By default, warnings will be given to any deprecated configuration + * parameters and these are suppressible by configuring + * log4j.logger.org.apache.hadoop.conf.Configuration.deprecation in + * log4j.properties file. */ @InterfaceAudience.Public @InterfaceStability.Stable @@ -161,6 +165,9 @@ public class Configuration implements Iterable>, private static final Log LOG = LogFactory.getLog(Configuration.class); + private static final Log LOG_DEPRECATION = + LogFactory.getLog("org.apache.hadoop.conf.Configuration.deprecation"); + private boolean quietmode = true; private static class Resource { @@ -836,7 +843,7 @@ public class Configuration implements Iterable>, private void warnOnceIfDeprecated(String name) { DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name); if (keyInfo != null && !keyInfo.accessed) { - LOG.warn(keyInfo.getWarningMessage(name)); + LOG_DEPRECATION.info(keyInfo.getWarningMessage(name)); } } From f6a1f4d1e0c60bceccc82d546b85abb25f1bcd88 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Tue, 20 Aug 2013 06:24:22 +0000 Subject: [PATCH 037/153] HADOOP-9879. Move the version info of zookeeper dependencies to hadoop-project/pom (Karthik Kambatla via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515711 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ hadoop-common-project/hadoop-common/pom.xml | 2 -- hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 1 - hadoop-hdfs-project/hadoop-hdfs/pom.xml | 1 - hadoop-project/pom.xml | 7 +++++++ 5 files changed, 10 insertions(+), 4 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index ca7be334c23..e32580fe7a3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -378,6 +378,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9802. Support Snappy codec on Windows. (cnauroth) + HADOOP-9879. Move the version info of zookeeper dependencies to + hadoop-project/pom (Karthik Kambatla via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index a7462ea5a3b..af5a7f4ee95 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -217,7 +217,6 @@ org.apache.zookeeper zookeeper - 3.4.2 jline @@ -245,7 +244,6 @@ org.apache.zookeeper zookeeper - 3.4.2 test-jar test diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index cb1906d9c83..945bb658385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -77,7 +77,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.zookeeper zookeeper - 3.4.2 test-jar test diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index f0e3ac882b5..59abffa0f96 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -63,7 +63,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.zookeeper zookeeper - 3.4.2 test-jar test diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index e1bb833c630..8dee23a467e 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -663,6 +663,13 @@ + + org.apache.zookeeper + zookeeper + 3.4.2 + test-jar + test + org.apache.bookkeeper bookkeeper-server From f7ca7ec4c9eb3ec65f0d365b41ce1b5e8923eb8a Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Tue, 20 Aug 2013 16:41:16 +0000 Subject: [PATCH 038/153] MAPREDUCE-5001. LocalJobRunner has race condition resulting in job failures. Contributed by Sandy Ryza git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515863 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 6 ++++++ .../java/org/apache/hadoop/mapreduce/Cluster.java | 14 +++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 7125f28e65c..00848a2ddc2 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -223,6 +223,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5454. TestDFSIO fails intermittently on JDK7 (Karthik Kambatla via Sandy Ryza) + MAPREDUCE-5001. LocalJobRunner has race condition resulting in job + failures (Sandy Ryza via jlowe) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES @@ -1298,6 +1301,9 @@ Release 0.23.10 - UNRELEASED MAPREDUCE-5440. TestCopyCommitter Fails on JDK7 (Robert Parker via jlowe) + MAPREDUCE-5001. LocalJobRunner has race condition resulting in job + failures (Sandy Ryza via jlowe) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java index e93f2736044..2fcc0461c39 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java @@ -18,6 +18,7 @@ package org.apache.hadoop.mapreduce; +import java.io.FileNotFoundException; import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedExceptionAction; @@ -181,7 +182,18 @@ public class Cluster { public Job getJob(JobID jobId) throws IOException, InterruptedException { JobStatus status = client.getJobStatus(jobId); if (status != null) { - return Job.getInstance(this, status, new JobConf(status.getJobFile())); + JobConf conf; + try { + conf = new JobConf(status.getJobFile()); + } catch (RuntimeException ex) { + // If job file doesn't exist it means we can't find the job + if (ex.getCause() instanceof FileNotFoundException) { + return null; + } else { + throw ex; + } + } + return Job.getInstance(this, status, conf); } return null; } From 3015429368e139bf54a697b93b692caa3629164b Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 20 Aug 2013 17:18:53 +0000 Subject: [PATCH 039/153] HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via Arpit Agarwal) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515890 13f79535-47bb-0310-9956-ffa450edef68 --- .../server/datanode/DatanodeJspHelper.java | 28 +++++++++ .../main/webapps/datanode/dataNodeHome.jsp | 58 +++++++++++++++++++ .../src/main/webapps/datanode/index.html | 35 +++++++++++ .../src/main/webapps/hdfs/index.html | 4 +- 4 files changed, 123 insertions(+), 2 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java index 80732f0d304..639468bbc75 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java @@ -19,7 +19,9 @@ package org.apache.hadoop.hdfs.server.datanode; import java.io.File; import java.io.IOException; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.URI; import java.net.URL; import java.net.URLEncoder; import java.security.PrivilegedExceptionAction; @@ -27,6 +29,7 @@ import java.text.SimpleDateFormat; import java.util.Date; import java.util.List; +import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.servlet.jsp.JspWriter; @@ -36,6 +39,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -43,6 +47,9 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager; import org.apache.hadoop.hdfs.server.common.JspHelper; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer; import org.apache.hadoop.http.HtmlQuoting; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.net.NetUtils; @@ -50,6 +57,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.ServletUtil; import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.VersionInfo; @InterfaceAudience.Private public class DatanodeJspHelper { @@ -712,4 +720,24 @@ public class DatanodeJspHelper { final String nnAddr = request.getParameter(JspHelper.NAMENODE_ADDRESS); return getDFSClient(ugi, nnAddr, conf); } + + /** Return a table containing version information. */ + public static String getVersionTable(ServletContext context) { + StringBuilder sb = new StringBuilder(); + final DataNode dataNode = (DataNode) context.getAttribute("datanode"); + sb.append("
"); + sb.append("\n" + "\n \n \n
Version:"); + sb.append(VersionInfo.getVersion() + ", " + VersionInfo.getRevision()); + sb.append("
Compiled:" + + VersionInfo.getDate()); + sb.append(" by " + VersionInfo.getUser() + " from " + + VersionInfo.getBranch()); + if (dataNode != null) { + sb.append("
Cluster ID:" + + dataNode.getClusterId()); + } + sb.append("
"); + return sb.toString(); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp new file mode 100644 index 00000000000..56a4f50dd1b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/dataNodeHome.jsp @@ -0,0 +1,58 @@ +<% +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +%> +<%@page import="org.apache.hadoop.hdfs.tools.GetConf"%> +<%@page import="org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper"%> +<%@page import="org.apache.hadoop.hdfs.server.datanode.DataNode"%> +<%@ page + contentType="text/html; charset=UTF-8" + import="org.apache.hadoop.util.ServletUtil" +%> +<%! + //for java.io.Serializable + private static final long serialVersionUID = 1L; +%> +<% + DataNode dataNode = (DataNode)getServletContext().getAttribute("datanode"); + String state = dataNode.isDatanodeUp()?"active":"inactive"; + String dataNodeLabel = dataNode.getDisplayName(); +%> + + + + + +Hadoop DataNode <%=dataNodeLabel%> + + +

DataNode '<%=dataNodeLabel%>' (<%=state%>)

+<%= DatanodeJspHelper.getVersionTable(getServletContext()) %> +
+DataNode Logs +
+View/Set Log Level +
+Metrics +
+Configuration +
+Block Scanner Report +<% +out.println(ServletUtil.htmlFooter()); +%> diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html new file mode 100644 index 00000000000..eaaa2228e3d --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/index.html @@ -0,0 +1,35 @@ + + + + +Hadoop Administration + + + + +

Hadoop Administration

+ + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html index 648da4ae2a8..7fc136b0c85 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/index.html @@ -1,5 +1,3 @@ - - + + Hadoop Administration From 00afcdfd3904e704c1b277e912d2eea26ef27b98 Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Tue, 20 Aug 2013 17:26:24 +0000 Subject: [PATCH 040/153] HDFS-2933. Update CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515892 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 1 file changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 7d8dc365f86..15e7a1b367e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -316,6 +316,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5047. Supress logging of full stack trace of quota and lease exceptions. (Robert Parker via kihwal) + HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via + Arpit Agarwal) + OPTIMIZATIONS BUG FIXES From 9718fd4c7245851bc678ee05e66ba6a98138a9cc Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 20 Aug 2013 17:33:07 +0000 Subject: [PATCH 041/153] HDFS-5111. Remove duplicated error message for snapshot commands when processing invalid arguments. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515895 13f79535-47bb-0310-9956-ffa450edef68 --- .../java/org/apache/hadoop/fs/shell/SnapshotCommands.java | 6 +++--- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java index ed687c14a61..570e442c282 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java @@ -79,7 +79,7 @@ class SnapshotCommands extends FsCommand { protected void processArguments(LinkedList items) throws IOException { super.processArguments(items); - if (exitCode != 0) { // check for error collecting paths + if (numErrors != 0) { // check for error collecting paths return; } assert(items.size() == 1); @@ -119,7 +119,7 @@ class SnapshotCommands extends FsCommand { protected void processArguments(LinkedList items) throws IOException { super.processArguments(items); - if (exitCode != 0) { // check for error collecting paths + if (numErrors != 0) { // check for error collecting paths return; } assert (items.size() == 1); @@ -160,7 +160,7 @@ class SnapshotCommands extends FsCommand { protected void processArguments(LinkedList items) throws IOException { super.processArguments(items); - if (exitCode != 0) { // check for error collecting paths + if (numErrors != 0) { // check for error collecting paths return; } Preconditions.checkArgument(items.size() == 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 15e7a1b367e..b2b285ecb05 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -319,6 +319,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-2933. Improve DataNode Web UI Index Page. (Vivek Ganesan via Arpit Agarwal) + HDFS-5111. Remove duplicated error message for snapshot commands when + processing invalid arguments. (jing9) + OPTIMIZATIONS BUG FIXES From b7a6c5ebb48275b3512bdbf201e0e8873b6d77b6 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Tue, 20 Aug 2013 20:19:07 +0000 Subject: [PATCH 042/153] HADOOP-9877. Fix listing of snapshot directories in globStatus. (Binglin Chang via Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515955 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../java/org/apache/hadoop/fs/Globber.java | 56 ++++++++++++++++--- .../fs/FileContextMainOperationsBaseTest.java | 15 +++++ .../hadoop/fs/TestFsShellReturnCode.java | 20 +++++++ .../fs/TestHDFSFileContextMainOperations.java | 11 ++++ 5 files changed, 96 insertions(+), 8 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e32580fe7a3..02ff55b5ab3 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -349,6 +349,8 @@ Release 2.3.0 - UNRELEASED HADOOP-9865. FileContext#globStatus has a regression with respect to relative path. (Chuan Lin via Colin Patrick McCabe) + HADOOP-9877. Fix listing of snapshot directories in globStatus. + (Binglin Chang via Andrew Wang) Release 2.1.1-beta - UNRELEASED diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index 378311a71a2..57ad45e81d4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -62,6 +62,18 @@ class Globber { } } + private FileStatus getFileLinkStatus(Path path) { + try { + if (fs != null) { + return fs.getFileLinkStatus(path); + } else { + return fc.getFileLinkStatus(path); + } + } catch (IOException e) { + return null; + } + } + private FileStatus[] listStatus(Path path) { try { if (fs != null) { @@ -122,6 +134,18 @@ class Globber { return authority ; } + /** + * The glob filter builds a regexp per path component. If the component + * does not contain a shell metachar, then it falls back to appending the + * raw string to the list of built up paths. This raw path needs to have + * the quoting removed. Ie. convert all occurrences of "\X" to "X" + * @param name of the path component + * @return the unquoted path component + */ + private static String unquotePathComponent(String name) { + return name.replaceAll("\\\\(.)", "$1"); + } + public FileStatus[] glob() throws IOException { // First we get the scheme and authority of the pattern that was passed // in. @@ -176,14 +200,30 @@ class Globber { resolvedCandidate.isDirectory() == false) { continue; } - FileStatus[] children = listStatus(candidate.getPath()); - for (FileStatus child : children) { - // Set the child path based on the parent path. - // This keeps the symlinks in our path. - child.setPath(new Path(candidate.getPath(), - child.getPath().getName())); - if (globFilter.accept(child.getPath())) { - newCandidates.add(child); + // For components without pattern, we get its FileStatus directly + // using getFileLinkStatus for two reasons: + // 1. It should be faster to only get FileStatus needed rather than + // get all children. + // 2. Some special filesystem directories (e.g. HDFS snapshot + // directories) are not returned by listStatus, but do exist if + // checked explicitly via getFileLinkStatus. + if (globFilter.hasPattern()) { + FileStatus[] children = listStatus(candidate.getPath()); + for (FileStatus child : children) { + // Set the child path based on the parent path. + // This keeps the symlinks in our path. + child.setPath(new Path(candidate.getPath(), + child.getPath().getName())); + if (globFilter.accept(child.getPath())) { + newCandidates.add(child); + } + } + } else { + Path p = new Path(candidate.getPath(), unquotePathComponent(component)); + FileStatus s = getFileLinkStatus(p); + if (s != null) { + s.setPath(p); + newCandidates.add(s); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java index 354f7aabfd6..877a491bf9b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java @@ -30,6 +30,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.junit.After; import org.junit.Assert; +import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -632,6 +633,20 @@ public abstract class FileContextMainOperationsBaseTest { filteredPaths)); } + protected Path getHiddenPathForTest() { + return null; + } + + @Test + public void testGlobStatusFilterWithHiddenPathTrivialFilter() + throws Exception { + Path hidden = getHiddenPathForTest(); + Assume.assumeNotNull(hidden); + FileStatus[] filteredPaths = fc.util().globStatus(hidden, DEFAULT_FILTER); + Assert.assertNotNull(filteredPaths); + Assert.assertEquals(1, filteredPaths.length); + } + @Test public void testWriteReadAndDeleteEmptyFile() throws Exception { writeReadAndDelete(0); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java index dcc19df3d4e..2fff29e38d4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java @@ -517,6 +517,26 @@ public class TestFsShellReturnCode { } return stat; } + + @Override + public FileStatus getFileLinkStatus(Path p) throws IOException { + String f = makeQualified(p).toString(); + FileStatus stat = super.getFileLinkStatus(p); + + stat.getPermission(); + if (owners.containsKey(f)) { + stat.setOwner("STUB-"+owners.get(f)); + } else { + stat.setOwner("REAL-"+stat.getOwner()); + } + if (groups.containsKey(f)) { + stat.setGroup("STUB-"+groups.get(f)); + } else { + stat.setGroup("REAL-"+stat.getGroup()); + } + return stat; + } + } static class MyFsShell extends FsShell { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index 6388bdd9e7a..8f5f14db614 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -59,6 +59,9 @@ public class TestHDFSFileContextMainOperations extends defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); + // Make defaultWorkingDirectory snapshottable to enable + // testGlobStatusFilterWithHiddenPathTrivialFilter + cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory); } private static void restartCluster() throws IOException, LoginException { @@ -73,6 +76,9 @@ public class TestHDFSFileContextMainOperations extends defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); + // Make defaultWorkingDirectory snapshottable to enable + // testGlobStatusFilterWithHiddenPathTrivialFilter + cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory); } @AfterClass @@ -92,6 +98,11 @@ public class TestHDFSFileContextMainOperations extends super.tearDown(); } + @Override + protected Path getHiddenPathForTest() { + return new Path(defaultWorkingDirectory, ".snapshot"); + } + @Override protected Path getDefaultWorkingDirectory() { return defaultWorkingDirectory; From 2cd6064195da817d2c34b64f19d4c6d630efbc4a Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Tue, 20 Aug 2013 21:53:38 +0000 Subject: [PATCH 043/153] HADOOP-9686. Easy access to final parameters in Configuration (Jason Lowe via jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515984 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/conf/Configuration.java | 9 +++++++++ .../apache/hadoop/conf/TestConfiguration.java | 18 +++++++++++++++++- 3 files changed, 29 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 02ff55b5ab3..5ccfc1d56ef 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -2074,6 +2074,9 @@ Release 0.23.10 - UNRELEASED IMPROVEMENTS + HADOOP-9686. Easy access to final parameters in Configuration (Jason Lowe + via jeagles) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java index 71d5ce4320a..8a4cc00f56a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java @@ -1918,6 +1918,15 @@ public class Configuration implements Iterable>, } } + /** + * Get the set of parameters marked final. + * + * @return final parameter set. + */ + public Set getFinalParameters() { + return new HashSet(finalParameters); + } + protected synchronized Properties getProps() { if (properties == null) { properties = new Properties(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java index 3bb211c54eb..87ebb61f49e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/conf/TestConfiguration.java @@ -1272,7 +1272,23 @@ public class TestConfiguration extends TestCase { Class clazz = config.getClassByNameOrNull("java.lang.Object"); assertNotNull(clazz); } - + + public void testGetFinalParameters() throws Exception { + out=new BufferedWriter(new FileWriter(CONFIG)); + startConfig(); + declareProperty("my.var", "x", "x", true); + endConfig(); + Path fileResource = new Path(CONFIG); + Configuration conf = new Configuration(); + Set finalParameters = conf.getFinalParameters(); + assertFalse("my.var already exists", finalParameters.contains("my.var")); + conf.addResource(fileResource); + assertEquals("my.var is undefined", "x", conf.get("my.var")); + assertFalse("finalparams not copied", finalParameters.contains("my.var")); + finalParameters = conf.getFinalParameters(); + assertTrue("my.var is not final", finalParameters.contains("my.var")); + } + public static void main(String[] argv) throws Exception { junit.textui.TestRunner.main(new String[]{ TestConfiguration.class.getName() From 5e68bc4cd62337e91e4eafe4d8c3c4692c0e30ea Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Tue, 20 Aug 2013 22:15:22 +0000 Subject: [PATCH 044/153] HDFS-4594. WebHDFS open sets Content-Length header to what is specified by length parameter rather than how much data is actually returned. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1515989 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../web/resources/DatanodeWebHdfsMethods.java | 5 +- .../web/TestWebHdfsFileSystemContract.java | 102 ++++++++++++++++++ 3 files changed, 108 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index b2b285ecb05..6a6e094a526 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -372,6 +372,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5106. TestDatanodeBlockScanner fails on Windows due to incorrect path format. (Chuan Liu via cnauroth) + HDFS-4594. WebHDFS open sets Content-Length header to what is specified by + length parameter rather than how much data is actually returned. (cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java index 262b66f9bcf..973d0916b90 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java @@ -410,8 +410,9 @@ public class DatanodeWebHdfsMethods { throw ioe; } - final long n = length.getValue() != null? length.getValue() - : in.getVisibleLength() - offset.getValue(); + final long n = length.getValue() != null ? + Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) : + in.getVisibleLength() - offset.getValue(); return Response.ok(new OpenEntity(in, n, dfsclient)).type( MediaType.APPLICATION_OCTET_STREAM).build(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java index 343aa775d0b..4181ce60376 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java @@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.web; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.InputStream; import java.io.InputStreamReader; import java.net.HttpURLConnection; import java.net.URL; @@ -45,8 +46,11 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.web.resources.DoAsParam; import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.hdfs.web.resources.HttpOpParam; +import org.apache.hadoop.hdfs.web.resources.LengthParam; import org.apache.hadoop.hdfs.web.resources.NamenodeRpcAddressParam; +import org.apache.hadoop.hdfs.web.resources.OffsetParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.UserGroupInformation; import org.junit.Assert; @@ -288,6 +292,104 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest { } } + /** + * Test get with length parameter greater than actual file length. + */ + public void testLengthParamLongerThanFile() throws IOException { + WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; + Path dir = new Path("/test"); + assertTrue(webhdfs.mkdirs(dir)); + + // Create a file with some content. + Path testFile = new Path("/test/testLengthParamLongerThanFile"); + String content = "testLengthParamLongerThanFile"; + FSDataOutputStream testFileOut = webhdfs.create(testFile); + try { + testFileOut.write(content.getBytes("US-ASCII")); + } finally { + IOUtils.closeStream(testFileOut); + } + + // Open the file, but request length longer than actual file length by 1. + HttpOpParam.Op op = GetOpParam.Op.OPEN; + URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf( + content.length() + 1))); + HttpURLConnection conn = null; + InputStream is = null; + try { + conn = (HttpURLConnection)url.openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.setDoOutput(op.getDoOutput()); + conn.setInstanceFollowRedirects(true); + + // Expect OK response and Content-Length header equal to actual length. + assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); + assertEquals(String.valueOf(content.length()), conn.getHeaderField( + "Content-Length")); + + // Check content matches. + byte[] respBody = new byte[content.length()]; + is = conn.getInputStream(); + IOUtils.readFully(is, respBody, 0, content.length()); + assertEquals(content, new String(respBody, "US-ASCII")); + } finally { + IOUtils.closeStream(is); + if (conn != null) { + conn.disconnect(); + } + } + } + + /** + * Test get with offset and length parameters that combine to request a length + * greater than actual file length. + */ + public void testOffsetPlusLengthParamsLongerThanFile() throws IOException { + WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; + Path dir = new Path("/test"); + assertTrue(webhdfs.mkdirs(dir)); + + // Create a file with some content. + Path testFile = new Path("/test/testOffsetPlusLengthParamsLongerThanFile"); + String content = "testOffsetPlusLengthParamsLongerThanFile"; + FSDataOutputStream testFileOut = webhdfs.create(testFile); + try { + testFileOut.write(content.getBytes("US-ASCII")); + } finally { + IOUtils.closeStream(testFileOut); + } + + // Open the file, but request offset starting at 1 and length equal to file + // length. Considering the offset, this is longer than the actual content. + HttpOpParam.Op op = GetOpParam.Op.OPEN; + URL url = webhdfs.toUrl(op, testFile, new LengthParam(Long.valueOf( + content.length())), new OffsetParam(1L)); + HttpURLConnection conn = null; + InputStream is = null; + try { + conn = (HttpURLConnection)url.openConnection(); + conn.setRequestMethod(op.getType().toString()); + conn.setDoOutput(op.getDoOutput()); + conn.setInstanceFollowRedirects(true); + + // Expect OK response and Content-Length header equal to actual length. + assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode()); + assertEquals(String.valueOf(content.length() - 1), conn.getHeaderField( + "Content-Length")); + + // Check content matches. + byte[] respBody = new byte[content.length() - 1]; + is = conn.getInputStream(); + IOUtils.readFully(is, respBody, 0, content.length() - 1); + assertEquals(content.substring(1), new String(respBody, "US-ASCII")); + } finally { + IOUtils.closeStream(is); + if (conn != null) { + conn.disconnect(); + } + } + } + public void testResponseCode() throws IOException { final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs; final Path root = new Path("/"); From a6ef93307eda6ff5c8bed5cfd72bb06b037644ce Mon Sep 17 00:00:00 2001 From: Arpit Gupta Date: Tue, 20 Aug 2013 23:44:47 +0000 Subject: [PATCH 045/153] HADOOP-9886. Turn warning message in RetryInvocationHandler to debug. Contributed by Arpit Gupta git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516034 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/io/retry/RetryInvocationHandler.java | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 5ccfc1d56ef..3d2f59b8cee 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -382,6 +382,8 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9879. Move the version info of zookeeper dependencies to hadoop-project/pom (Karthik Kambatla via Sandy Ryza) + + HADOOP-9886. Turn warning message in RetryInvocationHandler to debug (arpit) OPTIMIZATIONS diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java index 4bf000ee85c..974bac91eb4 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java @@ -136,8 +136,6 @@ public class RetryInvocationHandler implements RpcInvocationHandler { msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis); if (LOG.isDebugEnabled()) { LOG.debug(msg, e); - } else { - LOG.warn(msg); } } else { if(LOG.isDebugEnabled()) { From 782191f1ba27e0ff0acf3c6cf8a88df00274d308 Mon Sep 17 00:00:00 2001 From: Luke Lu Date: Wed, 21 Aug 2013 10:12:13 +0000 Subject: [PATCH 046/153] HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516128 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../org/apache/hadoop/http/HttpServer.java | 114 +++++++++++++++++- .../hadoop/http/HttpServerFunctionalTest.java | 16 ++- .../apache/hadoop/http/TestHttpServer.java | 9 +- .../org/apache/hadoop/log/TestLogLevel.java | 5 +- .../server/JournalNodeHttpServer.java | 21 ++-- .../hadoop/hdfs/server/datanode/DataNode.java | 14 ++- .../server/namenode/NameNodeHttpServer.java | 112 +++++++++-------- .../server/namenode/SecondaryNameNode.java | 22 ++-- .../namenode/TestEditLogFileInputStream.java | 3 +- .../hadoop/mapred/TestJobEndNotifier.java | 3 +- .../yarn/server/webproxy/WebAppProxy.java | 5 +- 12 files changed, 223 insertions(+), 103 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3d2f59b8cee..a2d1304ead7 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -291,6 +291,8 @@ Release 2.3.0 - UNRELEASED IMPROVEMENTS + HADOOP-9784. Add a builder for HttpServer. (Junping Du via llu) + HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem. (Junping Du via llu) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index b3d6d4ac68a..50582065473 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -47,6 +47,7 @@ import javax.servlet.http.HttpServletResponse; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.ConfServlet; @@ -119,18 +120,117 @@ public class HttpServer implements FilterContainer { protected final Map defaultContexts = new HashMap(); protected final List filterNames = new ArrayList(); - private static final int MAX_RETRIES = 10; static final String STATE_DESCRIPTION_ALIVE = " - alive"; static final String STATE_DESCRIPTION_NOT_LIVE = " - not live"; private final boolean listenerStartedExternally; + /** + * Class to construct instances of HTTP server with specific options. + */ + public static class Builder { + String name; + String bindAddress; + Integer port; + Boolean findPort; + Configuration conf; + Connector connector; + String[] pathSpecs; + AccessControlList adminsAcl; + boolean securityEnabled = false; + String usernameConfKey = null; + String keytabConfKey = null; + + public Builder setName(String name){ + this.name = name; + return this; + } + + public Builder setBindAddress(String bindAddress){ + this.bindAddress = bindAddress; + return this; + } + + public Builder setPort(int port) { + this.port = port; + return this; + } + + public Builder setFindPort(boolean findPort) { + this.findPort = findPort; + return this; + } + + public Builder setConf(Configuration conf) { + this.conf = conf; + return this; + } + + public Builder setConnector(Connector connector) { + this.connector = connector; + return this; + } + + public Builder setPathSpec(String[] pathSpec) { + this.pathSpecs = pathSpec; + return this; + } + + public Builder setACL(AccessControlList acl) { + this.adminsAcl = acl; + return this; + } + + public Builder setSecurityEnabled(boolean securityEnabled) { + this.securityEnabled = securityEnabled; + return this; + } + + public Builder setUsernameConfKey(String usernameConfKey) { + this.usernameConfKey = usernameConfKey; + return this; + } + + public Builder setKeytabConfKey(String keytabConfKey) { + this.keytabConfKey = keytabConfKey; + return this; + } + + public HttpServer build() throws IOException { + if (this.name == null) { + throw new HadoopIllegalArgumentException("name is not set"); + } + if (this.bindAddress == null) { + throw new HadoopIllegalArgumentException("bindAddress is not set"); + } + if (this.port == null) { + throw new HadoopIllegalArgumentException("port is not set"); + } + if (this.findPort == null) { + throw new HadoopIllegalArgumentException("findPort is not set"); + } + + if (this.conf == null) { + conf = new Configuration(); + } + + HttpServer server = new HttpServer(this.name, this.bindAddress, this.port, + this.findPort, this.conf, this.adminsAcl, this.connector, this.pathSpecs); + if (this.securityEnabled) { + server.initSpnego(this.conf, this.usernameConfKey, this.keytabConfKey); + } + return server; + } + } + /** Same as this(name, bindAddress, port, findPort, null); */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort ) throws IOException { this(name, bindAddress, port, findPort, new Configuration()); } - + + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, Connector connector) throws IOException { this(name, bindAddress, port, findPort, conf, null, connector, null); @@ -150,6 +250,7 @@ public class HttpServer implements FilterContainer { * @param pathSpecs Path specifications that this httpserver will be serving. * These will be added to any filters. */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, String[] pathSpecs) throws IOException { this(name, bindAddress, port, findPort, conf, null, null, pathSpecs); @@ -164,11 +265,13 @@ public class HttpServer implements FilterContainer { * increment by 1 until it finds a free port. * @param conf Configuration */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf) throws IOException { this(name, bindAddress, port, findPort, conf, null, null, null); } + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl) throws IOException { @@ -186,6 +289,7 @@ public class HttpServer implements FilterContainer { * @param conf Configuration * @param adminsAcl {@link AccessControlList} of the admins */ + @Deprecated public HttpServer(String name, String bindAddress, int port, boolean findPort, Configuration conf, AccessControlList adminsAcl, Connector connector) throws IOException { @@ -529,7 +633,7 @@ public class HttpServer implements FilterContainer { /** * Define a filter for a context and set up default url mappings. */ - protected void defineFilter(Context ctx, String name, + public void defineFilter(Context ctx, String name, String classname, Map parameters, String[] urls) { FilterHolder holder = new FilterHolder(); @@ -569,6 +673,10 @@ public class HttpServer implements FilterContainer { public Object getAttribute(String name) { return webAppContext.getAttribute(name); } + + public WebAppContext getWebAppContext(){ + return this.webAppContext; + } /** * Get the pathname to the webapps files. diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java index 6dee7eb7134..52d569d6e6d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java @@ -116,7 +116,8 @@ public class HttpServerFunctionalTest extends Assert { public static HttpServer createServer(String host, int port) throws IOException { prepareTestWebapp(); - return new HttpServer(TEST, host, port, true); + return new HttpServer.Builder().setName(TEST).setBindAddress(host) + .setPort(port).setFindPort(true).build(); } /** @@ -126,7 +127,8 @@ public class HttpServerFunctionalTest extends Assert { * @throws IOException if it could not be created */ public static HttpServer createServer(String webapp) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).build(); } /** * Create an HttpServer instance for the given webapp @@ -137,13 +139,16 @@ public class HttpServerFunctionalTest extends Assert { */ public static HttpServer createServer(String webapp, Configuration conf) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true, conf); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).setConf(conf).build(); } public static HttpServer createServer(String webapp, Configuration conf, AccessControlList adminsAcl) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true, conf, adminsAcl); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).setConf(conf).setACL(adminsAcl).build(); } + /** * Create an HttpServer instance for the given webapp * @param webapp the webapp to work with @@ -154,7 +159,8 @@ public class HttpServerFunctionalTest extends Assert { */ public static HttpServer createServer(String webapp, Configuration conf, String[] pathSpecs) throws IOException { - return new HttpServer(webapp, "0.0.0.0", 0, true, conf, pathSpecs); + return new HttpServer.Builder().setName(webapp).setBindAddress("0.0.0.0") + .setPort(0).setFindPort(true).setConf(conf).setPathSpec(pathSpecs).build(); } /** diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index 079bc370209..9dfaf3ec2a4 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -121,7 +121,6 @@ public class TestHttpServer extends HttpServerFunctionalTest { @SuppressWarnings("serial") public static class LongHeaderServlet extends HttpServlet { - @SuppressWarnings("unchecked") @Override public void doGet(HttpServletRequest request, HttpServletResponse response @@ -362,7 +361,8 @@ public class TestHttpServer extends HttpServerFunctionalTest { MyGroupsProvider.mapping.put("userA", Arrays.asList("groupA")); MyGroupsProvider.mapping.put("userB", Arrays.asList("groupB")); - HttpServer myServer = new HttpServer("test", "0.0.0.0", 0, true, conf); + HttpServer myServer = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); int port = myServer.getPort(); @@ -403,8 +403,9 @@ public class TestHttpServer extends HttpServerFunctionalTest { MyGroupsProvider.mapping.put("userD", Arrays.asList("groupD")); MyGroupsProvider.mapping.put("userE", Arrays.asList("groupE")); - HttpServer myServer = new HttpServer("test", "0.0.0.0", 0, true, conf, - new AccessControlList("userA,userB groupC,groupD")); + HttpServer myServer = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).setConf(conf) + .setACL(new AccessControlList("userA,userB groupC,groupD")).build(); myServer.setAttribute(HttpServer.CONF_CONTEXT_ATTRIBUTE, conf); myServer.start(); int port = myServer.getPort(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java index f2443c04d90..c5a0d0bc04c 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/log/TestLogLevel.java @@ -42,7 +42,10 @@ public class TestLogLevel extends TestCase { log.error("log.error1"); assertTrue(!Level.ERROR.equals(log.getEffectiveLevel())); - HttpServer server = new HttpServer("..", "localhost", 22222, true); + HttpServer server = new HttpServer.Builder().setName("..") + .setBindAddress("localhost").setPort(22222).setFindPort(true) + .build(); + server.start(); int port = server.getPort(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java index d5758a2698a..6c26dd75fc0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java @@ -36,8 +36,8 @@ import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; /** * Encapsulates the HTTP server started by the Journal Service. @@ -69,16 +69,15 @@ public class JournalNodeHttpServer { bindAddr.getHostName())); int tmpInfoPort = bindAddr.getPort(); - httpServer = new HttpServer("journal", bindAddr.getHostName(), - tmpInfoPort, tmpInfoPort == 0, conf, new AccessControlList(conf - .get(DFS_ADMIN, " "))) { - { - if (UserGroupInformation.isSecurityEnabled()) { - initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY, - DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY)); - } - } - }; + httpServer = new HttpServer.Builder().setName("journal") + .setBindAddress(bindAddr.getHostName()).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( + new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey( + DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, + DFS_JOURNALNODE_KEYTAB_FILE_KEY)).build(); httpServer.setAttribute(JN_ATTRIBUTE_KEY, localJournalNode); httpServer.setAttribute(JspHelper.CURRENT_CONF, conf); httpServer.addInternalServlet("getJournal", "/getJournal", diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 9a057830eaa..b86a5caebd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -385,11 +385,15 @@ public class DataNode extends Configured String infoHost = infoSocAddr.getHostName(); int tmpInfoPort = infoSocAddr.getPort(); this.infoServer = (secureResources == null) - ? new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, - conf, new AccessControlList(conf.get(DFS_ADMIN, " "))) - : new HttpServer("datanode", infoHost, tmpInfoPort, tmpInfoPort == 0, - conf, new AccessControlList(conf.get(DFS_ADMIN, " ")), - secureResources.getListener()); + ? new HttpServer.Builder().setName("datanode") + .setBindAddress(infoHost).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf) + .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))).build() + : new HttpServer.Builder().setName("datanode") + .setBindAddress(infoHost).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf) + .setACL(new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setConnector(secureResources.getListener()).build(); LOG.info("Opened info server at " + infoHost + ":" + tmpInfoPort); if (conf.getBoolean(DFS_HTTPS_ENABLE_KEY, false)) { boolean needClientAuth = conf.getBoolean(DFS_CLIENT_HTTPS_NEED_AUTH_KEY, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java index 93726b2a0b0..b645c9a0d47 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.server.namenode; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN; + import java.io.IOException; import java.net.InetSocketAddress; import java.util.HashMap; @@ -70,66 +71,31 @@ public class NameNodeHttpServer { public void start() throws IOException { final String infoHost = bindAddress.getHostName(); int infoPort = bindAddress.getPort(); + httpServer = new HttpServer.Builder().setName("hdfs") + .setBindAddress(infoHost).setPort(infoPort) + .setFindPort(infoPort == 0).setConf(conf).setACL( + new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey( + DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, + DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)).build(); + if (WebHdfsFileSystem.isEnabled(conf, HttpServer.LOG)) { + //add SPNEGO authentication filter for webhdfs + final String name = "SPNEGO"; + final String classname = AuthFilter.class.getName(); + final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; + Map params = getAuthFilterParams(conf); + httpServer.defineFilter(httpServer.getWebAppContext(), name, classname, params, + new String[]{pathSpec}); + HttpServer.LOG.info("Added filter '" + name + "' (class=" + classname + ")"); - httpServer = new HttpServer("hdfs", infoHost, infoPort, - infoPort == 0, conf, - new AccessControlList(conf.get(DFS_ADMIN, " "))) { - { - // Add SPNEGO support to NameNode - if (UserGroupInformation.isSecurityEnabled()) { - initSpnego(conf, - DFSConfigKeys.DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, - DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); - } - if (WebHdfsFileSystem.isEnabled(conf, LOG)) { - //add SPNEGO authentication filter for webhdfs - final String name = "SPNEGO"; - final String classname = AuthFilter.class.getName(); - final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*"; - Map params = getAuthFilterParams(conf); - defineFilter(webAppContext, name, classname, params, - new String[]{pathSpec}); - LOG.info("Added filter '" + name + "' (class=" + classname + ")"); - - // add webhdfs packages - addJerseyResourcePackage( - NamenodeWebHdfsMethods.class.getPackage().getName() - + ";" + Param.class.getPackage().getName(), pathSpec); - } + // add webhdfs packages + httpServer.addJerseyResourcePackage( + NamenodeWebHdfsMethods.class.getPackage().getName() + + ";" + Param.class.getPackage().getName(), pathSpec); } - private Map getAuthFilterParams(Configuration conf) - throws IOException { - Map params = new HashMap(); - String principalInConf = conf - .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); - if (principalInConf != null && !principalInConf.isEmpty()) { - params - .put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, - SecurityUtil.getServerPrincipal(principalInConf, - bindAddress.getHostName())); - } else if (UserGroupInformation.isSecurityEnabled()) { - LOG.error("WebHDFS and security are enabled, but configuration property '" + - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY + - "' is not set."); - } - String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); - if (httpKeytab != null && !httpKeytab.isEmpty()) { - params.put( - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, - httpKeytab); - } else if (UserGroupInformation.isSecurityEnabled()) { - LOG.error("WebHDFS and security are enabled, but configuration property '" + - DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY + - "' is not set."); - } - return params; - } - }; - boolean certSSL = conf.getBoolean(DFSConfigKeys.DFS_HTTPS_ENABLE_KEY, false); if (certSSL) { boolean needClientAuth = conf.getBoolean("dfs.https.need.client.auth", false); @@ -153,6 +119,38 @@ public class NameNodeHttpServer { httpServer.start(); httpAddress = new InetSocketAddress(bindAddress.getAddress(), httpServer.getPort()); } + + private Map getAuthFilterParams(Configuration conf) + throws IOException { + Map params = new HashMap(); + String principalInConf = conf + .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY); + if (principalInConf != null && !principalInConf.isEmpty()) { + params + .put( + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY, + SecurityUtil.getServerPrincipal(principalInConf, + bindAddress.getHostName())); + } else if (UserGroupInformation.isSecurityEnabled()) { + HttpServer.LOG.error( + "WebHDFS and security are enabled, but configuration property '" + + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY + + "' is not set."); + } + String httpKeytab = conf.get(DFSUtil.getSpnegoKeytabKey(conf, + DFSConfigKeys.DFS_NAMENODE_KEYTAB_FILE_KEY)); + if (httpKeytab != null && !httpKeytab.isEmpty()) { + params.put( + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY, + httpKeytab); + } else if (UserGroupInformation.isSecurityEnabled()) { + HttpServer.LOG.error( + "WebHDFS and security are enabled, but configuration property '" + + DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY + + "' is not set."); + } + return params; + } public void stop() throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 47f7222b1b6..844c77f1cfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -256,19 +256,15 @@ public class SecondaryNameNode implements Runnable { // initialize the webserver for uploading files. int tmpInfoPort = infoSocAddr.getPort(); - infoServer = new HttpServer("secondary", infoBindAddress, tmpInfoPort, - tmpInfoPort == 0, conf, - new AccessControlList(conf.get(DFS_ADMIN, " "))) { - { - if (UserGroupInformation.isSecurityEnabled()) { - initSpnego( - conf, - DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, - DFSUtil.getSpnegoKeytabKey(conf, - DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)); - } - } - }; + infoServer = new HttpServer.Builder().setName("secondary") + .setBindAddress(infoBindAddress).setPort(tmpInfoPort) + .setFindPort(tmpInfoPort == 0).setConf(conf).setACL( + new AccessControlList(conf.get(DFS_ADMIN, " "))) + .setSecurityEnabled(UserGroupInformation.isSecurityEnabled()) + .setUsernameConfKey( + DFSConfigKeys.DFS_SECONDARY_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY) + .setKeytabConfKey(DFSUtil.getSpnegoKeytabKey(conf, + DFSConfigKeys.DFS_SECONDARY_NAMENODE_KEYTAB_FILE_KEY)).build(); infoServer.setAttribute("secondary.name.node", this); infoServer.setAttribute("name.system.image", checkpointImage); infoServer.setAttribute(JspHelper.CURRENT_CONF, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java index ff0dff7cc31..c3497064c8a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileInputStream.java @@ -42,7 +42,8 @@ public class TestEditLogFileInputStream { @Test public void testReadURL() throws Exception { // Start a simple web server which hosts the log data. - HttpServer server = new HttpServer("test", "0.0.0.0", 0, true); + HttpServer server = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); server.start(); try { server.addServlet("fakeLog", "/fakeLog", FakeLogServlet.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java index 84905dadf5e..9e7ffc18003 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestJobEndNotifier.java @@ -102,7 +102,8 @@ public class TestJobEndNotifier extends TestCase { public void setUp() throws Exception { new File(System.getProperty("build.webapps", "build/webapps") + "/test" ).mkdirs(); - server = new HttpServer("test", "0.0.0.0", 0, true); + server = new HttpServer.Builder().setName("test") + .setBindAddress("0.0.0.0").setPort(0).setFindPort(true).build(); server.addServlet("delay", "/delay", DelayServlet.class); server.addServlet("jobend", "/jobend", JobEndServlet.class); server.addServlet("fail", "/fail", FailServlet.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java index 6b7e42fd0fc..76568d326b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java @@ -87,8 +87,9 @@ public class WebAppProxy extends AbstractService { @Override protected void serviceStart() throws Exception { try { - proxyServer = new HttpServer("proxy", bindAddress, port, - port == 0, getConfig(), acl); + proxyServer = new HttpServer.Builder().setName("proxy") + .setBindAddress(bindAddress).setPort(port).setFindPort(port == 0) + .setConf(getConfig()).setACL(acl).build(); proxyServer.addServlet(ProxyUriUtils.PROXY_SERVLET_NAME, ProxyUriUtils.PROXY_PATH_SPEC, WebAppProxyServlet.class); proxyServer.setAttribute(FETCHER_ATTRIBUTE, fetcher); From 270eaceba8a3c113ab9c98b80f8105c3fd28f852 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Wed, 21 Aug 2013 17:54:08 +0000 Subject: [PATCH 047/153] HDFS-5069 Include hadoop-nfs and hadoop-hdfs-nfs into hadoop dist for NFS deployment. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516232 13f79535-47bb-0310-9956-ffa450edef68 --- .../assemblies/hadoop-hdfs-nfs-dist.xml | 48 +++++++++++++++++++ .../resources/assemblies/hadoop-nfs-dist.xml | 48 +++++++++++++++++++ hadoop-common-project/hadoop-nfs/pom.xml | 46 ++++++++++++++++++ hadoop-dist/pom.xml | 2 + hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml | 44 +++++++++++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ 6 files changed, 191 insertions(+) create mode 100644 hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml create mode 100644 hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml new file mode 100644 index 00000000000..89e8771d809 --- /dev/null +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml @@ -0,0 +1,48 @@ + + + + hadoop-hdfs-nfs-dist + + dir + + false + + + target + /share/hadoop/hdfs + + ${project.artifactId}-${project.version}.jar + + + + + + + false + /share/hadoop/hdfs/lib + + + org.apache.hadoop:hadoop-common + org.apache.hadoop:hadoop-hdfs + + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + org.hsqldb:hsqldb + + + + + + diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml new file mode 100644 index 00000000000..927123985ed --- /dev/null +++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml @@ -0,0 +1,48 @@ + + + + hadoop-nfs-dist + + dir + + false + + + target + /share/hadoop/common + + ${project.artifactId}-${project.version}.jar + + + + + + + false + /share/hadoop/common/lib + + + org.apache.hadoop:hadoop-common + org.apache.hadoop:hadoop-hdfs + + org.slf4j:slf4j-api + org.slf4j:slf4j-log4j12 + org.hsqldb:hsqldb + + + + + + diff --git a/hadoop-common-project/hadoop-nfs/pom.xml b/hadoop-common-project/hadoop-nfs/pom.xml index 5b873052085..468c84d9ebc 100644 --- a/hadoop-common-project/hadoop-nfs/pom.xml +++ b/hadoop-common-project/hadoop-nfs/pom.xml @@ -95,4 +95,50 @@ 11.0.2
+ + + + + dist + + false + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + org.apache.hadoop + hadoop-assemblies + ${project.version} + + + + + dist + package + + single + + + ${project.artifactId}-${project.version} + false + false + + + ../../hadoop-assemblies/src/main/resources/assemblies/hadoop-nfs-dist.xml + + + + + + + + + + diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 5f063e91212..6a524fc4ba5 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -115,8 +115,10 @@ run mkdir hadoop-${project.version} run cd hadoop-${project.version} run cp -r $ROOT/hadoop-common-project/hadoop-common/target/hadoop-common-${project.version}/* . + run cp -r $ROOT/hadoop-common-project/hadoop-nfs/target/hadoop-nfs-${project.version}/* . run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-hdfs-${project.version}/* . run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-httpfs/target/hadoop-hdfs-httpfs-${project.version}/* . + run cp -r $ROOT/hadoop-hdfs-project/hadoop-hdfs-nfs/target/hadoop-hdfs-nfs-${project.version}/* . run cp -r $ROOT/hadoop-yarn-project/target/hadoop-yarn-project-${project.version}/* . run cp -r $ROOT/hadoop-mapreduce-project/target/hadoop-mapreduce-${project.version}/* . run cp -r $ROOT/hadoop-tools/hadoop-tools-dist/target/hadoop-tools-dist-${project.version}/* . diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml index 945bb658385..9577fe180f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml @@ -192,4 +192,48 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + + dist + + false + + + + + org.apache.maven.plugins + maven-assembly-plugin + + + org.apache.hadoop + hadoop-assemblies + ${project.version} + + + + + dist + package + + single + + + ${project.artifactId}-${project.version} + false + false + + + ../../hadoop-assemblies/src/main/resources/assemblies/hadoop-hdfs-nfs-dist.xml + + + + + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 6a6e094a526..5739532017f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -299,6 +299,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5110 Change FSDataOutputStream to HdfsDataOutputStream for opened streams to fix type cast error. (brandonli) + HDFS-5069 Include hadoop-nfs and hadoop-hdfs-nfs into hadoop dist for + NFS deployment (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 2499a86664103eb2d16bd53bf424446599b61820 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Wed, 21 Aug 2013 18:15:26 +0000 Subject: [PATCH 048/153] HDFS-4994. Audit log getContentSummary() calls. Contributed by Robert Parker. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516237 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../hdfs/server/namenode/FSNamesystem.java | 23 +++++++++++++++++-- 2 files changed, 23 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 5739532017f..02a8cc27cae 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -262,6 +262,8 @@ Release 2.3.0 - UNRELEASED HDFS-5068. Convert NNThroughputBenchmark to a Tool to allow generic options. (shv) + HDFS-4994. Audit log getContentSummary() calls. (Robert Parker via kihwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f15fe4fe388..030893028c4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -3415,12 +3415,26 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return true; } - ContentSummary getContentSummary(String src) throws AccessControlException, - FileNotFoundException, UnresolvedLinkException, StandbyException { + /** + * Get the content summary for a specific file/dir. + * + * @param src The string representation of the path to the file + * + * @throws AccessControlException if access is denied + * @throws UnresolvedLinkException if a symlink is encountered. + * @throws FileNotFoundException if no file exists + * @throws StandbyException + * @throws IOException for issues with writing to the audit log + * + * @return object containing information regarding the file + * or null if file not found + */ + ContentSummary getContentSummary(String src) throws IOException { FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.READ); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); readLock(); + boolean success = true; try { checkOperation(OperationCategory.READ); src = FSDirectory.resolvePath(src, pathComponents, dir); @@ -3428,8 +3442,13 @@ public class FSNamesystem implements Namesystem, FSClusterStats, checkPermission(pc, src, false, null, null, null, FsAction.READ_EXECUTE); } return dir.getContentSummary(src); + + } catch (AccessControlException ace) { + success = false; + throw ace; } finally { readUnlock(); + logAuditEvent(success, "contentSummary", src); } } From 2d614a916cc5958b709bddbee71d2dcb9cbb2bf9 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 21 Aug 2013 18:16:47 +0000 Subject: [PATCH 049/153] MAPREDUCE-5466. Changed MR AM to not promote history files of intermediate AMs in case they are exiting because of errors and thus help history-server pick up the right history file for the last successful AM. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516238 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 5 + .../jobhistory/JobHistoryEventHandler.java | 32 ++++- .../hadoop/mapreduce/v2/app/AppContext.java | 2 + .../hadoop/mapreduce/v2/app/MRAppMaster.java | 5 + .../mapreduce/v2/app/rm/RMCommunicator.java | 13 ++ .../TestJobHistoryEventHandler.java | 117 +++++++++++++++++- .../mapreduce/v2/app/MockAppContext.java | 5 + .../v2/app/TestRuntimeEstimators.java | 5 + .../src/main/avro/Events.avpr | 1 + .../mapreduce/jobhistory/EventReader.java | 2 + .../jobhistory/JobHistoryParser.java | 1 + .../JobUnsuccessfulCompletionEvent.java | 2 + .../hadoop/mapreduce/v2/hs/JobHistory.java | 6 + 13 files changed, 190 insertions(+), 6 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 00848a2ddc2..c73ab8b8db8 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -226,6 +226,11 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5001. LocalJobRunner has race condition resulting in job failures (Sandy Ryza via jlowe) + MAPREDUCE-5466. Changed MR AM to not promote history files of intermediate + AMs in case they are exiting because of errors and thus help history-server + pick up the right history file for the last successful AM. (Jian He via + vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index d3d3d0fa015..eef45e7be8a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -520,7 +520,7 @@ public class JobHistoryEventHandler extends AbstractService mi.getJobIndexInfo().setSubmitTime(jobSubmittedEvent.getSubmitTime()); mi.getJobIndexInfo().setQueueName(jobSubmittedEvent.getJobQueueName()); } - + // If this is JobFinishedEvent, close the writer and setup the job-index if (event.getHistoryEvent().getEventType() == EventType.JOB_FINISHED) { try { @@ -532,6 +532,24 @@ public class JobHistoryEventHandler extends AbstractService jFinishedEvent.getFinishedReduces()); mi.getJobIndexInfo().setJobStatus(JobState.SUCCEEDED.toString()); closeEventWriter(event.getJobID()); + processDoneFiles(event.getJobID()); + } catch (IOException e) { + throw new YarnRuntimeException(e); + } + } + // In case of JOB_ERROR, only process all the Done files(e.g. job + // summary, job history file etc.) if it is last AM retry. + if (event.getHistoryEvent().getEventType() == EventType.JOB_ERROR) { + try { + JobUnsuccessfulCompletionEvent jucEvent = + (JobUnsuccessfulCompletionEvent) event.getHistoryEvent(); + mi.getJobIndexInfo().setFinishTime(jucEvent.getFinishTime()); + mi.getJobIndexInfo().setNumMaps(jucEvent.getFinishedMaps()); + mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces()); + mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus()); + closeEventWriter(event.getJobID()); + if(context.isLastAMRetry()) + processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } @@ -548,6 +566,7 @@ public class JobHistoryEventHandler extends AbstractService mi.getJobIndexInfo().setNumReduces(jucEvent.getFinishedReduces()); mi.getJobIndexInfo().setJobStatus(jucEvent.getStatus()); closeEventWriter(event.getJobID()); + processDoneFiles(event.getJobID()); } catch (IOException e) { throw new YarnRuntimeException(e); } @@ -634,7 +653,6 @@ public class JobHistoryEventHandler extends AbstractService } protected void closeEventWriter(JobId jobId) throws IOException { - final MetaInfo mi = fileMap.get(jobId); if (mi == null) { throw new IOException("No MetaInfo found for JobId: [" + jobId + "]"); @@ -654,7 +672,15 @@ public class JobHistoryEventHandler extends AbstractService LOG.error("Error closing writer for JobID: " + jobId); throw e; } - + } + + protected void processDoneFiles(JobId jobId) throws IOException { + + final MetaInfo mi = fileMap.get(jobId); + if (mi == null) { + throw new IOException("No MetaInfo found for JobId: [" + jobId + "]"); + } + if (mi.getHistoryFile() == null) { LOG.warn("No file for job-history with " + jobId + " found in cache!"); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java index 946d9c62c42..885534313b0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/AppContext.java @@ -61,4 +61,6 @@ public interface AppContext { Set getBlacklistedNodes(); ClientToAMTokenSecretManager getClientToAMTokenSecretManager(); + + boolean isLastAMRetry(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 8abd58d5ee6..d8ddb2c29f1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -952,6 +952,11 @@ public class MRAppMaster extends CompositeService { public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() { return clientToAMTokenSecretManager; } + + @Override + public boolean isLastAMRetry(){ + return isLastAMRetry; + } } @SuppressWarnings("unchecked") diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java index 187a6e06205..ad1c9f1b506 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java @@ -36,9 +36,12 @@ import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; @@ -223,6 +226,7 @@ public abstract class RMCommunicator extends AbstractService protected void startAllocatorThread() { allocatorThread = new Thread(new Runnable() { + @SuppressWarnings("unchecked") @Override public void run() { while (!stopped.get() && !Thread.currentThread().isInterrupted()) { @@ -233,6 +237,15 @@ public abstract class RMCommunicator extends AbstractService } catch (YarnRuntimeException e) { LOG.error("Error communicating with RM: " + e.getMessage() , e); return; + } catch (InvalidToken e) { + // This can happen if the RM has been restarted, since currently + // when RM restarts AMRMToken is not populated back to + // AMRMTokenSecretManager yet. Once this is fixed, no need + // to send JOB_AM_REBOOT event in this method any more. + eventHandler.handle(new JobEvent(job.getID(), + JobEventType.JOB_AM_REBOOT)); + LOG.error("Error in authencating with RM: " ,e); + return; } catch (Exception e) { LOG.error("ERROR IN CONTACTING RM. ", e); continue; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java index c25cf4963d2..7964b9c8151 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/jobhistory/TestJobHistoryEventHandler.java @@ -25,10 +25,13 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.never; import java.io.File; import java.io.IOException; +import junit.framework.Assert; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; @@ -43,6 +46,7 @@ import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.app.AppContext; import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -229,6 +233,98 @@ public class TestJobHistoryEventHandler { } } + // In case of all types of events, process Done files if it's last AM retry + @Test (timeout=50000) + public void testProcessDoneFilesOnLastAMRetry() throws Exception { + TestParams t = new TestParams(true); + Configuration conf = new Configuration(); + + JHEvenHandlerForTest realJheh = + new JHEvenHandlerForTest(t.mockAppContext, 0); + JHEvenHandlerForTest jheh = spy(realJheh); + jheh.init(conf); + + EventWriter mockWriter = null; + try { + jheh.start(); + handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent( + t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000))); + verify(jheh, times(0)).processDoneFiles(any(JobId.class)); + + handleEvent(jheh, new JobHistoryEvent(t.jobId, + new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, + 0, 0, JobStateInternal.ERROR.toString()))); + verify(jheh, times(1)).processDoneFiles(any(JobId.class)); + + handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent( + TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), + new Counters(), new Counters()))); + verify(jheh, times(2)).processDoneFiles(any(JobId.class)); + + handleEvent(jheh, new JobHistoryEvent(t.jobId, + new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, + 0, 0, JobStateInternal.FAILED.toString()))); + verify(jheh, times(3)).processDoneFiles(any(JobId.class)); + + handleEvent(jheh, new JobHistoryEvent(t.jobId, + new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, + 0, 0, JobStateInternal.KILLED.toString()))); + verify(jheh, times(4)).processDoneFiles(any(JobId.class)); + + mockWriter = jheh.getEventWriter(); + verify(mockWriter, times(5)).write(any(HistoryEvent.class)); + } finally { + jheh.stop(); + verify(mockWriter).close(); + } + } + + // Skip processing Done files in case of ERROR, if it's not last AM retry + @Test (timeout=50000) + public void testProcessDoneFilesNotLastAMRetry() throws Exception { + TestParams t = new TestParams(false); + Configuration conf = new Configuration(); + JHEvenHandlerForTest realJheh = + new JHEvenHandlerForTest(t.mockAppContext, 0); + JHEvenHandlerForTest jheh = spy(realJheh); + jheh.init(conf); + + EventWriter mockWriter = null; + try { + jheh.start(); + handleEvent(jheh, new JobHistoryEvent(t.jobId, new AMStartedEvent( + t.appAttemptId, 200, t.containerId, "nmhost", 3000, 4000))); + verify(jheh, times(0)).processDoneFiles(t.jobId); + + // skip processing done files + handleEvent(jheh, new JobHistoryEvent(t.jobId, + new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, + 0, 0, JobStateInternal.ERROR.toString()))); + verify(jheh, times(0)).processDoneFiles(t.jobId); + + handleEvent(jheh, new JobHistoryEvent(t.jobId, new JobFinishedEvent( + TypeConverter.fromYarn(t.jobId), 0, 0, 0, 0, 0, new Counters(), + new Counters(), new Counters()))); + verify(jheh, times(1)).processDoneFiles(t.jobId); + + handleEvent(jheh, new JobHistoryEvent(t.jobId, + new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, + 0, 0, JobStateInternal.FAILED.toString()))); + verify(jheh, times(2)).processDoneFiles(t.jobId); + + handleEvent(jheh, new JobHistoryEvent(t.jobId, + new JobUnsuccessfulCompletionEvent(TypeConverter.fromYarn(t.jobId), 0, + 0, 0, JobStateInternal.KILLED.toString()))); + verify(jheh, times(3)).processDoneFiles(t.jobId); + + mockWriter = jheh.getEventWriter(); + verify(mockWriter, times(5)).write(any(HistoryEvent.class)); + } finally { + jheh.stop(); + verify(mockWriter).close(); + } + } + private void queueEvent(JHEvenHandlerForTest jheh, JobHistoryEvent event) { jheh.handle(event); } @@ -258,20 +354,23 @@ public class TestJobHistoryEventHandler { } } - private AppContext mockAppContext(ApplicationId appId) { + private AppContext mockAppContext(ApplicationId appId, boolean isLastAMRetry) { JobId jobId = TypeConverter.toYarn(TypeConverter.fromYarn(appId)); AppContext mockContext = mock(AppContext.class); Job mockJob = mock(Job.class); + when(mockJob.getAllCounters()).thenReturn(new Counters()); when(mockJob.getTotalMaps()).thenReturn(10); when(mockJob.getTotalReduces()).thenReturn(10); when(mockJob.getName()).thenReturn("mockjob"); when(mockContext.getJob(jobId)).thenReturn(mockJob); when(mockContext.getApplicationID()).thenReturn(appId); + when(mockContext.isLastAMRetry()).thenReturn(isLastAMRetry); return mockContext; } private class TestParams { + boolean isLastAMRetry; String workDir = setupTestWorkDir(); ApplicationId appId = ApplicationId.newInstance(200, 1); ApplicationAttemptId appAttemptId = @@ -279,7 +378,15 @@ public class TestJobHistoryEventHandler { ContainerId containerId = ContainerId.newInstance(appAttemptId, 1); TaskID taskID = TaskID.forName("task_200707121733_0003_m_000005"); JobId jobId = MRBuilderUtils.newJobId(appId, 1); - AppContext mockAppContext = mockAppContext(appId); + AppContext mockAppContext; + + public TestParams() { + this(false); + } + public TestParams(boolean isLastAMRetry) { + this.isLastAMRetry = isLastAMRetry; + mockAppContext = mockAppContext(appId, this.isLastAMRetry); + } } private JobHistoryEvent getEventToEnqueue(JobId jobId) { @@ -344,7 +451,6 @@ public class TestJobHistoryEventHandler { class JHEvenHandlerForTest extends JobHistoryEventHandler { private EventWriter eventWriter; - public JHEvenHandlerForTest(AppContext context, int startCount) { super(context, startCount); } @@ -367,6 +473,11 @@ class JHEvenHandlerForTest extends JobHistoryEventHandler { public EventWriter getEventWriter() { return this.eventWriter; } + + @Override + protected void processDoneFiles(JobId jobId){ + // do nothing + } } /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java index 4b07236705b..02a3209f594 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockAppContext.java @@ -130,4 +130,9 @@ public class MockAppContext implements AppContext { // Not implemented return null; } + + @Override + public boolean isLastAMRetry() { + return false; + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index 762dd572f3a..8ba57bbc248 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -862,5 +862,10 @@ public class TestRuntimeEstimators { public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() { return null; } + + @Override + public boolean isLastAMRetry() { + return false; + } } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr index b78fc80584d..64ecf376598 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr @@ -269,6 +269,7 @@ "JOB_STATUS_CHANGED", "JOB_FAILED", "JOB_KILLED", + "JOB_ERROR", "JOB_INFO_CHANGED", "TASK_STARTED", "TASK_FINISHED", diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java index 68eac638eff..37052b42c0f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java @@ -104,6 +104,8 @@ public class EventReader implements Closeable { result = new JobUnsuccessfulCompletionEvent(); break; case JOB_KILLED: result = new JobUnsuccessfulCompletionEvent(); break; + case JOB_ERROR: + result = new JobUnsuccessfulCompletionEvent(); break; case JOB_INFO_CHANGED: result = new JobInfoChangeEvent(); break; case TASK_STARTED: diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java index 3f8fb545298..d6835050228 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java @@ -185,6 +185,7 @@ public class JobHistoryParser implements HistoryEventHandler { break; case JOB_FAILED: case JOB_KILLED: + case JOB_ERROR: handleJobFailedEvent((JobUnsuccessfulCompletionEvent) event); break; case JOB_FINISHED: diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java index a1c374f522b..3adb91f2a8d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java @@ -72,6 +72,8 @@ public class JobUnsuccessfulCompletionEvent implements HistoryEvent { public EventType getEventType() { if ("FAILED".equals(getStatus())) { return EventType.JOB_FAILED; + } else if ("ERROR".equals(getStatus())) { + return EventType.JOB_ERROR; } else return EventType.JOB_KILLED; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java index da9bd3295f7..4ca4786e9d4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java @@ -381,4 +381,10 @@ public class JobHistory extends AbstractService implements HistoryContext { // Not implemented. return null; } + + @Override + public boolean isLastAMRetry() { + // bogus - Not Required + return false; + } } From 42a2846b3b824af808d7fd190f7dd4ea1ee10cbb Mon Sep 17 00:00:00 2001 From: Bikas Saha Date: Wed, 21 Aug 2013 23:33:59 +0000 Subject: [PATCH 050/153] YARN-881. Priority#compareTo method seems to be wrong. (Jian He via bikas) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516331 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/yarn/api/records/Priority.java | 2 +- .../resourcemanager/scheduler/fair/FairScheduler.java | 4 ++-- .../resourcemanager/scheduler/TestSchedulerUtils.java | 7 +++++++ 4 files changed, 12 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6101901f12a..3670ab84ed9 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -80,6 +80,8 @@ Release 2.1.1-beta - UNRELEASED YARN-1006. Fixed broken rendering in the Nodes list web page on the RM web UI. (Xuan Gong via vinodkv) + YARN-881. Priority#compareTo method seems to be wrong. (Jian He via bikas) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java index 1e3d2290538..c522a63e5d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Priority.java @@ -81,7 +81,7 @@ public abstract class Priority implements Comparable { @Override public int compareTo(Priority other) { - return this.getPriority() - other.getPriority(); + return other.getPriority() - this.getPriority(); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 16b543cdec1..b86b031ecf6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -385,8 +385,8 @@ public class FairScheduler implements ResourceScheduler { // Sort containers into reverse order of priority Collections.sort(runningContainers, new Comparator() { public int compare(RMContainer c1, RMContainer c2) { - int ret = c2.getContainer().getPriority().compareTo( - c1.getContainer().getPriority()); + int ret = c1.getContainer().getPriority().compareTo( + c2.getContainer().getPriority()); if (ret == 0) { return c2.getContainerId().compareTo(c1.getContainerId()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index 0f57d1b2bd5..9661057d133 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; @@ -351,4 +352,10 @@ public class TestSchedulerUtils { RMAppAttemptState.LAUNCHED); } + @Test + public void testComparePriorities(){ + Priority high = Priority.newInstance(1); + Priority low = Priority.newInstance(2); + assertTrue(high.compareTo(low) > 0); + } } From ad0011a3405fee6c6e45996d39557e0f8d420633 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Wed, 21 Aug 2013 23:55:04 +0000 Subject: [PATCH 051/153] YARN-1082. Create base directories on HDFS after RM login to ensure RM recovery doesn't fail in secure mode. Contributed by Vinod K V. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516337 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../server/resourcemanager/RMContextImpl.java | 2 +- .../resourcemanager/ResourceManager.java | 17 ++++++---- .../recovery/FileSystemRMStateStore.java | 12 +++++-- .../recovery/MemoryRMStateStore.java | 6 +++- .../recovery/NullRMStateStore.java | 9 ++++-- .../recovery/RMStateStore.java | 32 +++++++++++++------ .../recovery/TestRMStateStore.java | 7 ++-- .../scheduler/fair/TestFairScheduler.java | 11 ++++--- 9 files changed, 70 insertions(+), 29 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 3670ab84ed9..04ad1a27a00 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -82,6 +82,9 @@ Release 2.1.1-beta - UNRELEASED YARN-881. Priority#compareTo method seems to be wrong. (Jian He via bikas) + YARN-1082. Create base directories on HDFS after RM login to ensure RM + recovery doesn't fail in secure mode. (vinodkv via acmurthy) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index 1151d77ec16..368f9c49c8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -100,7 +100,7 @@ public class RMContextImpl implements RMContext { containerTokenSecretManager, nmTokenSecretManager, clientToAMTokenSecretManager); RMStateStore nullStore = new NullRMStateStore(); - nullStore.setDispatcher(rmDispatcher); + nullStore.setRMDispatcher(rmDispatcher); try { nullStore.init(new YarnConfiguration()); setStateStore(nullStore); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 841f387e7d9..a4fb30f85cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -67,18 +67,18 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ContainerPreemptEventType; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.PreemptableResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEventType; import org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; +import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; -import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.webapp.RMWebApp; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.webproxy.AppReportFetcher; @@ -186,16 +186,17 @@ public class ResourceManager extends CompositeService implements Recoverable { recoveryEnabled = false; rmStore = new NullRMStateStore(); } + try { rmStore.init(conf); - rmStore.setDispatcher(rmDispatcher); + rmStore.setRMDispatcher(rmDispatcher); } catch (Exception e) { // the Exception from stateStore.init() needs to be handled for // HA and we need to give up master status if we got fenced LOG.error("Failed to init state store", e); ExitUtil.terminate(1, e); } - + this.rmContext = new RMContextImpl(this.rmDispatcher, rmStore, this.containerAllocationExpirer, amLivelinessMonitor, @@ -275,7 +276,7 @@ public class ResourceManager extends CompositeService implements Recoverable { @VisibleForTesting protected void setRMStateStore(RMStateStore rmStore) { - rmStore.setDispatcher(rmDispatcher); + rmStore.setRMDispatcher(rmDispatcher); ((RMContextImpl) rmContext).setStateStore(rmStore); } @@ -601,9 +602,13 @@ public class ResourceManager extends CompositeService implements Recoverable { this.containerTokenSecretManager.start(); this.nmTokenSecretManager.start(); + RMStateStore rmStore = rmContext.getStateStore(); + // The state store needs to start irrespective of recoveryEnabled as apps + // need events to move to further states. + rmStore.start(); + if(recoveryEnabled) { try { - RMStateStore rmStore = rmContext.getStateStore(); RMState state = rmStore.loadState(); recover(state); } catch (Exception e) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index a5262b001fd..de1f65a8016 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -70,7 +70,7 @@ public class FileSystemRMStateStore extends RMStateStore { private static final String DELEGATION_TOKEN_SEQUENCE_NUMBER_PREFIX = "RMDTSequenceNumber_"; - private FileSystem fs; + protected FileSystem fs; private Path rootDirPath; private Path rmDTSecretManagerRoot; @@ -80,6 +80,7 @@ public class FileSystemRMStateStore extends RMStateStore { @VisibleForTesting Path fsWorkingPath; + @Override public synchronized void initInternal(Configuration conf) throws Exception{ @@ -87,9 +88,14 @@ public class FileSystemRMStateStore extends RMStateStore { rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); rmDTSecretManagerRoot = new Path(rootDirPath, RM_DT_SECRET_MANAGER_ROOT); rmAppRoot = new Path(rootDirPath, RM_APP_ROOT); + } - // create filesystem - fs = fsWorkingPath.getFileSystem(conf); + @Override + protected void startInternal() throws Exception { + // create filesystem only now, as part of service-start. By this time, RM is + // authenticated with kerberos so we are good to create a file-system + // handle. + fs = fsWorkingPath.getFileSystem(getConfig()); fs.mkdirs(rmDTSecretManagerRoot); fs.mkdirs(rmAppRoot); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index 0f18b159b9c..bdf4da38f0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -65,7 +65,11 @@ public class MemoryRMStateStore extends RMStateStore { @Override public synchronized void initInternal(Configuration conf) { } - + + @Override + protected synchronized void startInternal() throws Exception { + } + @Override protected synchronized void closeInternal() throws Exception { } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index 4abd256731d..003346bb461 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -21,10 +21,10 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; @Unstable public class NullRMStateStore extends RMStateStore { @@ -34,6 +34,11 @@ public class NullRMStateStore extends RMStateStore { // Do nothing } + @Override + protected void startInternal() throws Exception { + // Do nothing + } + @Override protected void closeInternal() throws Exception { // Do nothing diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index 313898fcfa5..865e7260f44 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -34,6 +34,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.DelegationKey; +import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -60,9 +61,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAt * Real store implementations need to derive from it and implement blocking * store and load methods to actually store and load the state. */ -public abstract class RMStateStore { +public abstract class RMStateStore extends AbstractService { + public static final Log LOG = LogFactory.getLog(RMStateStore.class); + public RMStateStore() { + super(RMStateStore.class.getName()); + } + /** * State of an application attempt */ @@ -174,31 +180,39 @@ public abstract class RMStateStore { * Dispatcher used to send state operation completion events to * ResourceManager services */ - public void setDispatcher(Dispatcher dispatcher) { + public void setRMDispatcher(Dispatcher dispatcher) { this.rmDispatcher = dispatcher; } AsyncDispatcher dispatcher; - public synchronized void init(Configuration conf) throws Exception{ + public synchronized void serviceInit(Configuration conf) throws Exception{ // create async handler dispatcher = new AsyncDispatcher(); dispatcher.init(conf); dispatcher.register(RMStateStoreEventType.class, new ForwardingEventHandler()); - dispatcher.start(); - initInternal(conf); } + + protected synchronized void serviceStart() throws Exception { + dispatcher.start(); + startInternal(); + } /** * Derived classes initialize themselves using this method. - * The base class is initialized and the event dispatcher is ready to use at - * this point */ protected abstract void initInternal(Configuration conf) throws Exception; - - public synchronized void close() throws Exception { + + /** + * Derived classes start themselves using this method. + * The base class is started and the event dispatcher is ready to use at + * this point + */ + protected abstract void startInternal() throws Exception; + + public synchronized void serviceStop() throws Exception { closeInternal(); dispatcher.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java index a24af257bd4..05916129e3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java @@ -129,7 +129,10 @@ public class TestRMStateStore { class TestFileSystemRMStore extends FileSystemRMStateStore { TestFileSystemRMStore(Configuration conf) throws Exception { init(conf); + Assert.assertNull(fs); assertTrue(workingDirPathURI.equals(fsWorkingPath)); + start(); + Assert.assertNotNull(fs); } } @@ -218,7 +221,7 @@ public class TestRMStateStore { Configuration conf = new YarnConfiguration(); RMStateStore store = stateStoreHelper.getRMStateStore(); TestDispatcher dispatcher = new TestDispatcher(); - store.setDispatcher(dispatcher); + store.setRMDispatcher(dispatcher); AMRMTokenSecretManager appTokenMgr = new AMRMTokenSecretManager(conf); @@ -327,7 +330,7 @@ public class TestRMStateStore { RMStateStoreHelper stateStoreHelper) throws Exception { RMStateStore store = stateStoreHelper.getRMStateStore(); TestDispatcher dispatcher = new TestDispatcher(); - store.setDispatcher(dispatcher); + store.setRMDispatcher(dispatcher); // store RM delegation token; RMDelegationTokenIdentifier dtId1 = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 1d68338a026..12c40b9d264 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -39,12 +39,10 @@ import javax.xml.parsers.ParserConfigurationException; import junit.framework.Assert; -import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.MockApps; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; @@ -57,11 +55,11 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; -import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.MockRMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -81,8 +79,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemoved import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; -import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.resource.Resources; @@ -132,7 +128,12 @@ public class TestFairScheduler { conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false"); resourceManager = new ResourceManager(); resourceManager.init(conf); + + // TODO: This test should really be using MockRM. For now starting stuff + // that is needed at a bare minimum. ((AsyncDispatcher)resourceManager.getRMContext().getDispatcher()).start(); + resourceManager.getRMContext().getStateStore().start(); + scheduler.reinitialize(conf, resourceManager.getRMContext()); // to initialize the master key resourceManager.getRMContainerTokenSecretManager().rollMasterKey(); From 6fd8766a514e9240a63a4e5660a2fe1646dc609b Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Thu, 22 Aug 2013 01:06:57 +0000 Subject: [PATCH 052/153] HDFS-5045. Add more unit tests for retry cache to cover all AtMostOnce methods. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516348 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSClient.java | 3 +- .../namenode/ha/TestRetryCacheWithHA.java | 690 +++++++++++++++++- 3 files changed, 656 insertions(+), 40 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 02a8cc27cae..0fe0fd84009 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -327,6 +327,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5111. Remove duplicated error message for snapshot commands when processing invalid arguments. (jing9) + HDFS-5045. Add more unit tests for retry cache to cover all AtMostOnce + methods. (jing9) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 0aded40b073..511df17b472 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -596,7 +596,8 @@ public class DFSClient implements java.io.Closeable { return dfsClientConf.hdfsTimeout; } - String getClientName() { + @VisibleForTesting + public String getClientName() { return clientName; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java index 5e2a63203e9..dff44a0c89c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java @@ -26,15 +26,22 @@ import java.lang.reflect.Method; import java.lang.reflect.Proxy; import java.net.URI; import java.net.UnknownHostException; +import java.util.EnumSet; import java.util.HashMap; import java.util.Iterator; import java.util.Map; +import java.util.Random; import java.util.concurrent.atomic.AtomicBoolean; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CreateFlag; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -43,9 +50,20 @@ import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSNNTopology; import org.apache.hadoop.hdfs.NameNodeProxies; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream.SyncFlag; import org.apache.hadoop.hdfs.protocol.ClientProtocol; -import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.hdfs.protocol.DatanodeInfo; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; +import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; +import org.apache.hadoop.hdfs.protocol.LocatedBlock; +import org.apache.hadoop.hdfs.protocol.LocatedBlocks; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; +import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction; +import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper; +import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.retry.FailoverProxyProvider; import org.apache.hadoop.io.retry.RetryInvocationHandler; @@ -60,14 +78,13 @@ import org.junit.Test; public class TestRetryCacheWithHA { private static final Log LOG = LogFactory.getLog(TestRetryCacheWithHA.class); - private static MiniDFSCluster cluster; - private static DistributedFileSystem dfs; - private static Configuration conf = new HdfsConfiguration(); - private static final int BlockSize = 1024; private static final short DataNodes = 3; - private final static Map results = - new HashMap(); + private static final int CHECKTIMES = 10; + + private MiniDFSCluster cluster; + private DistributedFileSystem dfs; + private Configuration conf = new HdfsConfiguration(); /** * A dummy invocation handler extending RetryInvocationHandler. We can use @@ -120,7 +137,7 @@ public class TestRetryCacheWithHA { * 2. Trigger the NN failover * 3. Check the retry cache on the original standby NN */ - @Test + @Test (timeout=60000) public void testRetryCacheOnStandbyNN() throws Exception { // 1. run operations DFSTestUtil.runOperations(cluster, dfs, conf, BlockSize, 0); @@ -180,26 +197,624 @@ public class TestRetryCacheWithHA { return client; } + abstract class AtMostOnceOp { + private final String name; + final DFSClient client; + + AtMostOnceOp(String name, DFSClient client) { + this.name = name; + this.client = client; + } + + abstract void prepare() throws Exception; + abstract void invoke() throws Exception; + abstract boolean checkNamenodeBeforeReturn() throws Exception; + abstract Object getResult(); + } + + /** createSnapshot operaiton */ + class CreateSnapshotOp extends AtMostOnceOp { + private String snapshotPath; + private String dir; + private String snapshotName; + + CreateSnapshotOp(DFSClient client, String dir, String snapshotName) { + super("createSnapshot", client); + this.dir = dir; + this.snapshotName = snapshotName; + } + + @Override + void prepare() throws Exception { + final Path dirPath = new Path(dir); + if (!dfs.exists(dirPath)) { + dfs.mkdirs(dirPath); + dfs.allowSnapshot(dirPath); + } + } + + @Override + void invoke() throws Exception { + this.snapshotPath = client.createSnapshot(dir, snapshotName); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + final Path sPath = SnapshotTestHelper.getSnapshotRoot(new Path(dir), + snapshotName); + boolean snapshotCreated = dfs.exists(sPath); + for (int i = 0; i < CHECKTIMES && !snapshotCreated; i++) { + Thread.sleep(1000); + snapshotCreated = dfs.exists(sPath); + } + return snapshotCreated; + } + + @Override + Object getResult() { + return snapshotPath; + } + } + + /** deleteSnapshot */ + class DeleteSnapshotOp extends AtMostOnceOp { + private String dir; + private String snapshotName; + + DeleteSnapshotOp(DFSClient client, String dir, String snapshotName) { + super("deleteSnapshot", client); + this.dir = dir; + this.snapshotName = snapshotName; + } + + @Override + void prepare() throws Exception { + final Path dirPath = new Path(dir); + if (!dfs.exists(dirPath)) { + dfs.mkdirs(dirPath); + } + + Path sPath = SnapshotTestHelper.getSnapshotRoot(dirPath, snapshotName); + if (!dfs.exists(sPath)) { + dfs.allowSnapshot(dirPath); + dfs.createSnapshot(dirPath, snapshotName); + } + } + + @Override + void invoke() throws Exception { + client.deleteSnapshot(dir, snapshotName); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + final Path sPath = SnapshotTestHelper.getSnapshotRoot(new Path(dir), + snapshotName); + boolean snapshotNotDeleted = dfs.exists(sPath); + for (int i = 0; i < CHECKTIMES && snapshotNotDeleted; i++) { + Thread.sleep(1000); + snapshotNotDeleted = dfs.exists(sPath); + } + return !snapshotNotDeleted; + } + + @Override + Object getResult() { + return null; + } + } + + /** renameSnapshot */ + class RenameSnapshotOp extends AtMostOnceOp { + private String dir; + private String oldName; + private String newName; + + RenameSnapshotOp(DFSClient client, String dir, String oldName, + String newName) { + super("renameSnapshot", client); + this.dir = dir; + this.oldName = oldName; + this.newName = newName; + } + + @Override + void prepare() throws Exception { + final Path dirPath = new Path(dir); + if (!dfs.exists(dirPath)) { + dfs.mkdirs(dirPath); + } + + Path sPath = SnapshotTestHelper.getSnapshotRoot(dirPath, oldName); + if (!dfs.exists(sPath)) { + dfs.allowSnapshot(dirPath); + dfs.createSnapshot(dirPath, oldName); + } + } + + @Override + void invoke() throws Exception { + client.renameSnapshot(dir, oldName, newName); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + final Path sPath = SnapshotTestHelper.getSnapshotRoot(new Path(dir), + newName); + boolean snapshotRenamed = dfs.exists(sPath); + for (int i = 0; i < CHECKTIMES && !snapshotRenamed; i++) { + Thread.sleep(1000); + snapshotRenamed = dfs.exists(sPath); + } + return snapshotRenamed; + } + + @Override + Object getResult() { + return null; + } + } + + /** create file operation (without OverWrite) */ + class CreateOp extends AtMostOnceOp { + private String fileName; + private HdfsFileStatus status; + + CreateOp(DFSClient client, String fileName) { + super("create", client); + this.fileName = fileName; + } + + @Override + void prepare() throws Exception { + final Path filePath = new Path(fileName); + if (dfs.exists(filePath)) { + dfs.delete(filePath, true); + } + final Path fileParent = filePath.getParent(); + if (!dfs.exists(fileParent)) { + dfs.mkdirs(fileParent); + } + } + + @Override + void invoke() throws Exception { + EnumSet createFlag = EnumSet.of(CreateFlag.CREATE); + this.status = client.getNamenode().create(fileName, + FsPermission.getFileDefault(), client.getClientName(), + new EnumSetWritable(createFlag), false, DataNodes, + BlockSize); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + final Path filePath = new Path(fileName); + boolean fileCreated = dfs.exists(filePath); + for (int i = 0; i < CHECKTIMES && !fileCreated; i++) { + Thread.sleep(1000); + fileCreated = dfs.exists(filePath); + } + return fileCreated; + } + + @Override + Object getResult() { + return status; + } + } + + /** append operation */ + class AppendOp extends AtMostOnceOp { + private String fileName; + private LocatedBlock lbk; + + AppendOp(DFSClient client, String fileName) { + super("append", client); + this.fileName = fileName; + } + + @Override + void prepare() throws Exception { + final Path filePath = new Path(fileName); + if (!dfs.exists(filePath)) { + DFSTestUtil.createFile(dfs, filePath, BlockSize / 2, DataNodes, 0); + } + } + + @Override + void invoke() throws Exception { + lbk = client.getNamenode().append(fileName, client.getClientName()); + } + + // check if the inode of the file is under construction + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + INodeFile fileNode = cluster.getNameNode(0).getNamesystem() + .getFSDirectory().getINode4Write(fileName).asFile(); + boolean fileIsUC = fileNode.isUnderConstruction(); + for (int i = 0; i < CHECKTIMES && !fileIsUC; i++) { + Thread.sleep(1000); + fileNode = cluster.getNameNode(0).getNamesystem().getFSDirectory() + .getINode4Write(fileName).asFile(); + fileIsUC = fileNode.isUnderConstruction(); + } + return fileIsUC; + } + + @Override + Object getResult() { + return lbk; + } + } + + /** rename */ + class RenameOp extends AtMostOnceOp { + private String oldName; + private String newName; + private boolean renamed; + + RenameOp(DFSClient client, String oldName, String newName) { + super("rename", client); + this.oldName = oldName; + this.newName = newName; + } + + @Override + void prepare() throws Exception { + final Path filePath = new Path(oldName); + if (!dfs.exists(filePath)) { + DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); + } + } + + @SuppressWarnings("deprecation") + @Override + void invoke() throws Exception { + this.renamed = client.rename(oldName, newName); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + Path targetPath = new Path(newName); + boolean renamed = dfs.exists(targetPath); + for (int i = 0; i < CHECKTIMES && !renamed; i++) { + Thread.sleep(1000); + renamed = dfs.exists(targetPath); + } + return renamed; + } + + @Override + Object getResult() { + return new Boolean(renamed); + } + } + + /** rename2 */ + class Rename2Op extends AtMostOnceOp { + private String oldName; + private String newName; + + Rename2Op(DFSClient client, String oldName, String newName) { + super("rename2", client); + this.oldName = oldName; + this.newName = newName; + } + + @Override + void prepare() throws Exception { + final Path filePath = new Path(oldName); + if (!dfs.exists(filePath)) { + DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); + } + } + + @Override + void invoke() throws Exception { + client.rename(oldName, newName, Rename.OVERWRITE); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + Path targetPath = new Path(newName); + boolean renamed = dfs.exists(targetPath); + for (int i = 0; i < CHECKTIMES && !renamed; i++) { + Thread.sleep(1000); + renamed = dfs.exists(targetPath); + } + return renamed; + } + + @Override + Object getResult() { + return null; + } + } + + /** concat */ + class ConcatOp extends AtMostOnceOp { + private String target; + private String[] srcs; + private Path[] srcPaths; + + ConcatOp(DFSClient client, Path target, int numSrc) { + super("concat", client); + this.target = target.toString(); + this.srcs = new String[numSrc]; + this.srcPaths = new Path[numSrc]; + Path parent = target.getParent(); + for (int i = 0; i < numSrc; i++) { + srcPaths[i] = new Path(parent, "srcfile" + i); + srcs[i] = srcPaths[i].toString(); + } + } + + @Override + void prepare() throws Exception { + DFSTestUtil.createFile(dfs, new Path(target), BlockSize, DataNodes, 0); + for (int i = 0; i < srcPaths.length; i++) { + DFSTestUtil.createFile(dfs, srcPaths[i], BlockSize, DataNodes, 0); + } + } + + @Override + void invoke() throws Exception { + client.concat(target, srcs); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + Path targetPath = new Path(target); + boolean done = dfs.exists(targetPath); + for (int i = 0; i < CHECKTIMES && !done; i++) { + Thread.sleep(1000); + done = dfs.exists(targetPath); + } + return done; + } + + @Override + Object getResult() { + return null; + } + } + + /** delete */ + class DeleteOp extends AtMostOnceOp { + private String target; + private boolean deleted; + + DeleteOp(DFSClient client, String target) { + super("delete", client); + this.target = target; + } + + @Override + void prepare() throws Exception { + Path p = new Path(target); + if (!dfs.exists(p)) { + DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0); + } + } + + @Override + void invoke() throws Exception { + deleted = client.delete(target, true); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + Path targetPath = new Path(target); + boolean del = !dfs.exists(targetPath); + for (int i = 0; i < CHECKTIMES && !del; i++) { + Thread.sleep(1000); + del = !dfs.exists(targetPath); + } + return del; + } + + @Override + Object getResult() { + return new Boolean(deleted); + } + } + + /** createSymlink */ + class CreateSymlinkOp extends AtMostOnceOp { + private String target; + private String link; + + public CreateSymlinkOp(DFSClient client, String target, String link) { + super("createSymlink", client); + this.target = target; + this.link = link; + } + + @Override + void prepare() throws Exception { + Path p = new Path(target); + if (!dfs.exists(p)) { + DFSTestUtil.createFile(dfs, p, BlockSize, DataNodes, 0); + } + } + + @Override + void invoke() throws Exception { + client.createSymlink(target, link, false); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + Path linkPath = new Path(link); + FileStatus linkStatus = dfs.getFileLinkStatus(linkPath); + for (int i = 0; i < CHECKTIMES && linkStatus == null; i++) { + Thread.sleep(1000); + linkStatus = dfs.getFileLinkStatus(linkPath); + } + return linkStatus != null; + } + + @Override + Object getResult() { + return null; + } + } + + /** updatePipeline */ + class UpdatePipelineOp extends AtMostOnceOp { + private String file; + private ExtendedBlock oldBlock; + private ExtendedBlock newBlock; + private DatanodeInfo[] nodes; + private FSDataOutputStream out; + + public UpdatePipelineOp(DFSClient client, String file) { + super("updatePipeline", client); + this.file = file; + } + + @Override + void prepare() throws Exception { + final Path filePath = new Path(file); + DFSTestUtil.createFile(dfs, filePath, BlockSize, DataNodes, 0); + // append to the file and leave the last block under construction + out = this.client.append(file, BlockSize, null, null); + byte[] appendContent = new byte[100]; + new Random().nextBytes(appendContent); + out.write(appendContent); + ((HdfsDataOutputStream) out).hsync(EnumSet.of(SyncFlag.UPDATE_LENGTH)); + + LocatedBlocks blks = dfs.getClient() + .getLocatedBlocks(file, BlockSize + 1); + assertEquals(1, blks.getLocatedBlocks().size()); + nodes = blks.get(0).getLocations(); + oldBlock = blks.get(0).getBlock(); + + LocatedBlock newLbk = client.getNamenode().updateBlockForPipeline( + oldBlock, client.getClientName()); + newBlock = new ExtendedBlock(oldBlock.getBlockPoolId(), + oldBlock.getBlockId(), oldBlock.getNumBytes(), + newLbk.getBlock().getGenerationStamp()); + } + + @Override + void invoke() throws Exception { + DatanodeInfo[] newNodes = new DatanodeInfo[2]; + newNodes[0] = nodes[0]; + newNodes[1] = nodes[1]; + + client.getNamenode().updatePipeline(client.getClientName(), oldBlock, + newBlock, newNodes); + out.close(); + } + + @Override + boolean checkNamenodeBeforeReturn() throws Exception { + INodeFileUnderConstruction fileNode = (INodeFileUnderConstruction) cluster + .getNamesystem(0).getFSDirectory().getINode4Write(file).asFile(); + BlockInfoUnderConstruction blkUC = + (BlockInfoUnderConstruction) (fileNode.getBlocks())[1]; + int datanodeNum = blkUC.getExpectedLocations().length; + for (int i = 0; i < CHECKTIMES && datanodeNum != 2; i++) { + Thread.sleep(1000); + datanodeNum = blkUC.getExpectedLocations().length; + } + return datanodeNum == 2; + } + + @Override + Object getResult() { + return null; + } + } + + @Test (timeout=60000) + public void testCreateSnapshot() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new CreateSnapshotOp(client, "/test", "s1"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testDeleteSnapshot() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new DeleteSnapshotOp(client, "/test", "s1"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testRenameSnapshot() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new RenameSnapshotOp(client, "/test", "s1", "s2"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testCreate() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new CreateOp(client, "/testfile"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testAppend() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new AppendOp(client, "/testfile"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testRename() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new RenameOp(client, "/file1", "/file2"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testRename2() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new Rename2Op(client, "/file1", "/file2"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testConcat() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new ConcatOp(client, new Path("/test/file"), 5); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testDelete() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new DeleteOp(client, "/testfile"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testCreateSymlink() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new CreateSymlinkOp(client, "/testfile", "/testlink"); + testClientRetryWithFailover(op); + } + + @Test (timeout=60000) + public void testUpdatePipeline() throws Exception { + final DFSClient client = genClientWithDummyHandler(); + AtMostOnceOp op = new UpdatePipelineOp(client, "/testfile"); + testClientRetryWithFailover(op); + } + /** * When NN failover happens, if the client did not receive the response and * send a retry request to the other NN, the same response should be recieved * based on the retry cache. - * - * TODO: currently we only test the createSnapshot from the client side. We - * may need to cover all the calls with "@AtMostOnce" annotation. */ - @Test - public void testClientRetryWithFailover() throws Exception { - final String dir = "/test"; - final Path dirPath = new Path(dir); - final String sName = "s1"; - final String dirSnapshot = dir + HdfsConstants.SEPARATOR_DOT_SNAPSHOT_DIR - + Path.SEPARATOR + sName; + public void testClientRetryWithFailover(final AtMostOnceOp op) + throws Exception { + final Map results = new HashMap(); - dfs.mkdirs(dirPath); - dfs.allowSnapshot(dirPath); - - final DFSClient client = genClientWithDummyHandler(); + op.prepare(); // set DummyRetryInvocationHandler#block to true DummyRetryInvocationHandler.block.set(true); @@ -207,28 +822,25 @@ public class TestRetryCacheWithHA { @Override public void run() { try { - final String snapshotPath = client.createSnapshot(dir, "s1"); - assertEquals(dirSnapshot, snapshotPath); - LOG.info("Created snapshot " + snapshotPath); + op.invoke(); + Object result = op.getResult(); + LOG.info("Operation " + op.name + " finished"); synchronized (TestRetryCacheWithHA.this) { - results.put("createSnapshot", snapshotPath); + results.put(op.name, result == null ? "SUCCESS" : result); TestRetryCacheWithHA.this.notifyAll(); } - } catch (IOException e) { - LOG.info("Got IOException " + e + " while creating snapshot"); + } catch (Exception e) { + LOG.info("Got Exception while calling " + op.name, e); } finally { - IOUtils.cleanup(null, client); + IOUtils.cleanup(null, op.client); } } }.start(); - // make sure the client's createSnapshot call has actually been handled by - // the active NN - boolean snapshotCreated = dfs.exists(new Path(dirSnapshot)); - while (!snapshotCreated) { - Thread.sleep(1000); - snapshotCreated = dfs.exists(new Path(dirSnapshot)); - } + // make sure the client's call has actually been handled by the active NN + assertTrue("After waiting the operation " + op.name + + " still has not taken effect on NN yet", + op.checkNamenodeBeforeReturn()); // force the failover cluster.transitionToStandby(0); @@ -238,11 +850,11 @@ public class TestRetryCacheWithHA { DummyRetryInvocationHandler.block.set(false); synchronized (this) { - while (!results.containsKey("createSnapshot")) { + while (!results.containsKey(op.name)) { this.wait(); } - LOG.info("Got the result of createSnapshot: " - + results.get("createSnapshot")); + LOG.info("Got the result of " + op.name + ": " + + results.get(op.name)); } } } \ No newline at end of file From 487ce6c7bc87819659602a4d930bc50d31f5d022 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Thu, 22 Aug 2013 01:19:26 +0000 Subject: [PATCH 053/153] YARN-1082. Addendum patch. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516352 13f79535-47bb-0310-9956-ffa450edef68 --- .../server/resourcemanager/recovery/FileSystemRMStateStore.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index de1f65a8016..30d5d4108d4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -91,7 +91,7 @@ public class FileSystemRMStateStore extends RMStateStore { } @Override - protected void startInternal() throws Exception { + protected synchronized void startInternal() throws Exception { // create filesystem only now, as part of service-start. By this time, RM is // authenticated with kerberos so we are good to create a file-system // handle. From ded91b4cfa22c8d7c498ea21c8c1ac52fe9a9e29 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Thu, 22 Aug 2013 01:52:24 +0000 Subject: [PATCH 054/153] MAPREDUCE-5468. Fix MR AM recovery for map-only jobs. Contributed by Vinod K. V. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516358 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../hadoop/mapreduce/v2/app/MRAppMaster.java | 9 +- .../hadoop/mapreduce/v2/app/TestRecovery.java | 111 +++++++++++++++++- 3 files changed, 118 insertions(+), 5 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index c73ab8b8db8..d0345594acc 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -231,6 +231,9 @@ Release 2.1.1-beta - UNRELEASED pick up the right history file for the last successful AM. (Jian He via vinodkv) + MAPREDUCE-5468. Fix MR AM recovery for map-only jobs. (vinodkv via + acmurthy) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index d8ddb2c29f1..ab1c4feadf0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -1042,11 +1042,11 @@ public class MRAppMaster extends CompositeService { // attempt will generate one. However that disables recovery if there // are reducers as the shuffle secret would be app attempt specific. int numReduceTasks = getConfig().getInt(MRJobConfig.NUM_REDUCES, 0); - boolean shuffleKeyValidForRecovery = (numReduceTasks > 0 && - TokenCache.getShuffleSecretKey(jobCredentials) != null); + boolean shuffleKeyValidForRecovery = + TokenCache.getShuffleSecretKey(jobCredentials) != null; if (recoveryEnabled && recoverySupportedByCommitter - && shuffleKeyValidForRecovery) { + && (numReduceTasks <= 0 || shuffleKeyValidForRecovery)) { LOG.info("Recovery is enabled. " + "Will try to recover from previous life on best effort basis."); try { @@ -1059,7 +1059,8 @@ public class MRAppMaster extends CompositeService { } else { LOG.info("Will not try to recover. recoveryEnabled: " + recoveryEnabled + " recoverySupportedByCommitter: " - + recoverySupportedByCommitter + " shuffleKeyValidForRecovery: " + + recoverySupportedByCommitter + " numReduceTasks: " + + numReduceTasks + " shuffleKeyValidForRecovery: " + shuffleKeyValidForRecovery + " ApplicationAttemptID: " + appAttemptID.getAttemptId()); // Get the amInfos anyways whether recovery is enabled or not diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java index 911fd0d022d..1c17b224da1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java @@ -114,7 +114,6 @@ public class TestRecovery { private Text val1 = new Text("val1"); private Text val2 = new Text("val2"); - /** * AM with 2 maps and 1 reduce. For 1st map, one attempt fails, one attempt * completely disappears because of failed launch, one attempt gets killed and @@ -316,6 +315,116 @@ public class TestRecovery { // available in the failed attempt should be available here } + /** + * AM with 3 maps and 0 reduce. AM crashes after the first two tasks finishes + * and recovers completely and succeeds in the second generation. + * + * @throws Exception + */ + @Test + public void testCrashOfMapsOnlyJob() throws Exception { + int runCount = 0; + MRApp app = + new MRAppWithHistory(3, 0, false, this.getClass().getName(), true, + ++runCount); + Configuration conf = new Configuration(); + conf.setBoolean("mapred.mapper.new-api", true); + conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); + Job job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + + // all maps would be running + Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task mapTask1 = it.next(); + Task mapTask2 = it.next(); + Task mapTask3 = it.next(); + + // all maps must be running + app.waitForState(mapTask1, TaskState.RUNNING); + app.waitForState(mapTask2, TaskState.RUNNING); + app.waitForState(mapTask3, TaskState.RUNNING); + + TaskAttempt task1Attempt = + mapTask1.getAttempts().values().iterator().next(); + TaskAttempt task2Attempt = + mapTask2.getAttempts().values().iterator().next(); + TaskAttempt task3Attempt = + mapTask3.getAttempts().values().iterator().next(); + + // before sending the TA_DONE, event make sure attempt has come to + // RUNNING state + app.waitForState(task1Attempt, TaskAttemptState.RUNNING); + app.waitForState(task2Attempt, TaskAttemptState.RUNNING); + app.waitForState(task3Attempt, TaskAttemptState.RUNNING); + + // send the done signal to the 1st two maps + app + .getContext() + .getEventHandler() + .handle( + new TaskAttemptEvent(task1Attempt.getID(), TaskAttemptEventType.TA_DONE)); + app + .getContext() + .getEventHandler() + .handle( + new TaskAttemptEvent(task2Attempt.getID(), TaskAttemptEventType.TA_DONE)); + + // wait for first two map task to complete + app.waitForState(mapTask1, TaskState.SUCCEEDED); + app.waitForState(mapTask2, TaskState.SUCCEEDED); + + // stop the app + app.stop(); + + // rerun + // in rerun the 1st two map will be recovered from previous run + app = + new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, + ++runCount); + conf = new Configuration(); + conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true); + conf.setBoolean("mapred.mapper.new-api", true); + conf.set(FileOutputFormat.OUTDIR, outputDir.toString()); + // Set num-reduces explicitly in conf as recovery logic depends on it. + conf.setInt(MRJobConfig.NUM_REDUCES, 0); + conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false); + job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + + Assert.assertEquals("No of tasks not correct", 3, job.getTasks().size()); + it = job.getTasks().values().iterator(); + mapTask1 = it.next(); + mapTask2 = it.next(); + mapTask3 = it.next(); + + // first two maps will be recovered, no need to send done + app.waitForState(mapTask1, TaskState.SUCCEEDED); + app.waitForState(mapTask2, TaskState.SUCCEEDED); + + app.waitForState(mapTask3, TaskState.RUNNING); + + task3Attempt = mapTask3.getAttempts().values().iterator().next(); + // before sending the TA_DONE, event make sure attempt has come to + // RUNNING state + app.waitForState(task3Attempt, TaskAttemptState.RUNNING); + + // send the done signal to the 3rd map task + app + .getContext() + .getEventHandler() + .handle( + new TaskAttemptEvent(mapTask3.getAttempts().values().iterator().next() + .getID(), TaskAttemptEventType.TA_DONE)); + + // wait to get it completed + app.waitForState(mapTask3, TaskState.SUCCEEDED); + + app.waitForState(job, JobState.SUCCEEDED); + app.verifyCompleted(); + } + @Test public void testMultipleCrashes() throws Exception { From 148bf3ea4e6fddef2c3ab9bae46120ab0fa9e1e2 Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Thu, 22 Aug 2013 01:59:25 +0000 Subject: [PATCH 055/153] MAPREDUCE-5475. Ensure MRClientService verifies ACLs for users. Contributed by Jason Lowe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516361 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 + .../v2/app/client/MRClientService.java | 64 +++++++++----- .../mapreduce/v2/app/TestMRClientService.java | 85 +++++++++++++++++++ 3 files changed, 130 insertions(+), 22 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index d0345594acc..6d3e6d0303f 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -234,6 +234,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5468. Fix MR AM recovery for map-only jobs. (vinodkv via acmurthy) + MAPREDUCE-5475. Ensure MRClientService verifies ACLs for users. (jlowe via + acmurthy) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 4bb39696e1e..d36bf62fdf0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; @@ -78,6 +79,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider; import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -175,16 +178,22 @@ public class MRClientService extends AbstractService return getBindAddress(); } - private Job verifyAndGetJob(JobId jobID, - boolean modifyAccess) throws IOException { + private Job verifyAndGetJob(JobId jobID, + JobACL accessType) throws IOException { Job job = appContext.getJob(jobID); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + if (!job.checkAccess(ugi, accessType)) { + throw new AccessControlException("User " + ugi.getShortUserName() + + " cannot perform operation " + accessType.name() + " on " + + jobID); + } return job; } private Task verifyAndGetTask(TaskId taskID, - boolean modifyAccess) throws IOException { + JobACL accessType) throws IOException { Task task = verifyAndGetJob(taskID.getJobId(), - modifyAccess).getTask(taskID); + accessType).getTask(taskID); if (task == null) { throw new IOException("Unknown Task " + taskID); } @@ -192,9 +201,9 @@ public class MRClientService extends AbstractService } private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, - boolean modifyAccess) throws IOException { + JobACL accessType) throws IOException { TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), - modifyAccess).getAttempt(attemptID); + accessType).getAttempt(attemptID); if (attempt == null) { throw new IOException("Unknown TaskAttempt " + attemptID); } @@ -205,7 +214,7 @@ public class MRClientService extends AbstractService public GetCountersResponse getCounters(GetCountersRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class); response.setCounters(TypeConverter.toYarn(job.getAllCounters())); @@ -216,7 +225,7 @@ public class MRClientService extends AbstractService public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class); if (job != null) { @@ -235,7 +244,7 @@ public class MRClientService extends AbstractService GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class); response.setTaskAttemptReport( - verifyAndGetAttempt(taskAttemptId, false).getReport()); + verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport()); return response; } @@ -245,7 +254,8 @@ public class MRClientService extends AbstractService TaskId taskId = request.getTaskId(); GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class); - response.setTaskReport(verifyAndGetTask(taskId, false).getReport()); + response.setTaskReport( + verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport()); return response; } @@ -256,7 +266,7 @@ public class MRClientService extends AbstractService JobId jobId = request.getJobId(); int fromEventId = request.getFromEventId(); int maxEvents = request.getMaxEvents(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class); @@ -270,9 +280,11 @@ public class MRClientService extends AbstractService public KillJobResponse killJob(KillJobRequest request) throws IOException { JobId jobId = request.getJobId(); - String message = "Kill Job received from client " + jobId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill job " + jobId + " received from " + callerUGI + + " at " + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetJob(jobId, true); + verifyAndGetJob(jobId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new JobDiagnosticsUpdateEvent(jobId, message)); appContext.getEventHandler().handle( @@ -287,9 +299,11 @@ public class MRClientService extends AbstractService public KillTaskResponse killTask(KillTaskRequest request) throws IOException { TaskId taskId = request.getTaskId(); - String message = "Kill task received from client " + taskId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill task " + taskId + " received from " + callerUGI + + " at " + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetTask(taskId, true); + verifyAndGetTask(taskId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskEvent(taskId, TaskEventType.T_KILL)); KillTaskResponse response = @@ -302,9 +316,12 @@ public class MRClientService extends AbstractService public KillTaskAttemptResponse killTaskAttempt( KillTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - String message = "Kill task attempt received from client " + taskAttemptId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill task attempt " + taskAttemptId + + " received from " + callerUGI + " at " + + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetAttempt(taskAttemptId, true); + verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -322,8 +339,8 @@ public class MRClientService extends AbstractService GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class); - response.addAllDiagnostics( - verifyAndGetAttempt(taskAttemptId, false).getDiagnostics()); + response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, + JobACL.VIEW_JOB).getDiagnostics()); return response; } @@ -332,9 +349,12 @@ public class MRClientService extends AbstractService public FailTaskAttemptResponse failTaskAttempt( FailTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - String message = "Fail task attempt received from client " + taskAttemptId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Fail task attempt " + taskAttemptId + + " received from " + callerUGI + " at " + + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetAttempt(taskAttemptId, true); + verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -356,7 +376,7 @@ public class MRClientService extends AbstractService GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); Collection tasks = job.getTasks(taskType).values(); LOG.info("Getting task report for " + taskType + " " + jobId + ". Report-size will be " + tasks.size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java index 34b8dc76354..b17b8ce7adc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java @@ -18,13 +18,20 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.Assert.fail; + +import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.List; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobACL; +import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; @@ -32,6 +39,9 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompleti import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -51,6 +61,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -169,6 +181,79 @@ public class TestMRClientService { app.waitForState(job, JobState.SUCCEEDED); } + @Test + public void testViewAclOnlyCannotModify() throws Exception { + final MRAppWithClientService app = new MRAppWithClientService(1, 0, false); + final Configuration conf = new Configuration(); + conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); + conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser"); + Job job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task task = it.next(); + app.waitForState(task, TaskState.RUNNING); + TaskAttempt attempt = task.getAttempts().values().iterator().next(); + app.waitForState(attempt, TaskAttemptState.RUNNING); + + UserGroupInformation viewOnlyUser = + UserGroupInformation.createUserForTesting( + "viewonlyuser", new String[] {}); + Assert.assertTrue("viewonlyuser cannot view job", + job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB)); + Assert.assertFalse("viewonlyuser can modify job", + job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB)); + MRClientProtocol client = viewOnlyUser.doAs( + new PrivilegedExceptionAction() { + @Override + public MRClientProtocol run() throws Exception { + YarnRPC rpc = YarnRPC.create(conf); + return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, + app.clientService.getBindAddress(), conf); + } + }); + + KillJobRequest killJobRequest = recordFactory.newRecordInstance( + KillJobRequest.class); + killJobRequest.setJobId(app.getJobId()); + try { + client.killJob(killJobRequest); + fail("viewonlyuser killed job"); + } catch (AccessControlException e) { + // pass + } + + KillTaskRequest killTaskRequest = recordFactory.newRecordInstance( + KillTaskRequest.class); + killTaskRequest.setTaskId(task.getID()); + try { + client.killTask(killTaskRequest); + fail("viewonlyuser killed task"); + } catch (AccessControlException e) { + // pass + } + + KillTaskAttemptRequest killTaskAttemptRequest = + recordFactory.newRecordInstance(KillTaskAttemptRequest.class); + killTaskAttemptRequest.setTaskAttemptId(attempt.getID()); + try { + client.killTaskAttempt(killTaskAttemptRequest); + fail("viewonlyuser killed task attempt"); + } catch (AccessControlException e) { + // pass + } + + FailTaskAttemptRequest failTaskAttemptRequest = + recordFactory.newRecordInstance(FailTaskAttemptRequest.class); + failTaskAttemptRequest.setTaskAttemptId(attempt.getID()); + try { + client.failTaskAttempt(failTaskAttemptRequest); + fail("viewonlyuser killed task attempt"); + } catch (AccessControlException e) { + // pass + } + } + private void verifyJobReport(JobReport jr) { Assert.assertNotNull("JobReport is null", jr); List amInfos = jr.getAMInfos(); From d2c70e3e94c123970ad5883814d0b40690bbe9b9 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Thu, 22 Aug 2013 20:34:46 +0000 Subject: [PATCH 056/153] Revert MAPREDUCE-5475 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516594 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 - .../v2/app/client/MRClientService.java | 64 +++++--------- .../mapreduce/v2/app/TestMRClientService.java | 85 ------------------- 3 files changed, 22 insertions(+), 130 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 6d3e6d0303f..d0345594acc 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -234,9 +234,6 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5468. Fix MR AM recovery for map-only jobs. (vinodkv via acmurthy) - MAPREDUCE-5475. Ensure MRClientService verifies ACLs for users. (jlowe via - acmurthy) - Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index d36bf62fdf0..4bb39696e1e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; @@ -79,8 +78,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider; import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -178,22 +175,16 @@ public class MRClientService extends AbstractService return getBindAddress(); } - private Job verifyAndGetJob(JobId jobID, - JobACL accessType) throws IOException { + private Job verifyAndGetJob(JobId jobID, + boolean modifyAccess) throws IOException { Job job = appContext.getJob(jobID); - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - if (!job.checkAccess(ugi, accessType)) { - throw new AccessControlException("User " + ugi.getShortUserName() - + " cannot perform operation " + accessType.name() + " on " - + jobID); - } return job; } private Task verifyAndGetTask(TaskId taskID, - JobACL accessType) throws IOException { + boolean modifyAccess) throws IOException { Task task = verifyAndGetJob(taskID.getJobId(), - accessType).getTask(taskID); + modifyAccess).getTask(taskID); if (task == null) { throw new IOException("Unknown Task " + taskID); } @@ -201,9 +192,9 @@ public class MRClientService extends AbstractService } private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, - JobACL accessType) throws IOException { + boolean modifyAccess) throws IOException { TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), - accessType).getAttempt(attemptID); + modifyAccess).getAttempt(attemptID); if (attempt == null) { throw new IOException("Unknown TaskAttempt " + attemptID); } @@ -214,7 +205,7 @@ public class MRClientService extends AbstractService public GetCountersResponse getCounters(GetCountersRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class); response.setCounters(TypeConverter.toYarn(job.getAllCounters())); @@ -225,7 +216,7 @@ public class MRClientService extends AbstractService public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class); if (job != null) { @@ -244,7 +235,7 @@ public class MRClientService extends AbstractService GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class); response.setTaskAttemptReport( - verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport()); + verifyAndGetAttempt(taskAttemptId, false).getReport()); return response; } @@ -254,8 +245,7 @@ public class MRClientService extends AbstractService TaskId taskId = request.getTaskId(); GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class); - response.setTaskReport( - verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport()); + response.setTaskReport(verifyAndGetTask(taskId, false).getReport()); return response; } @@ -266,7 +256,7 @@ public class MRClientService extends AbstractService JobId jobId = request.getJobId(); int fromEventId = request.getFromEventId(); int maxEvents = request.getMaxEvents(); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class); @@ -280,11 +270,9 @@ public class MRClientService extends AbstractService public KillJobResponse killJob(KillJobRequest request) throws IOException { JobId jobId = request.getJobId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Kill job " + jobId + " received from " + callerUGI - + " at " + Server.getRemoteAddress(); + String message = "Kill Job received from client " + jobId; LOG.info(message); - verifyAndGetJob(jobId, JobACL.MODIFY_JOB); + verifyAndGetJob(jobId, true); appContext.getEventHandler().handle( new JobDiagnosticsUpdateEvent(jobId, message)); appContext.getEventHandler().handle( @@ -299,11 +287,9 @@ public class MRClientService extends AbstractService public KillTaskResponse killTask(KillTaskRequest request) throws IOException { TaskId taskId = request.getTaskId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Kill task " + taskId + " received from " + callerUGI - + " at " + Server.getRemoteAddress(); + String message = "Kill task received from client " + taskId; LOG.info(message); - verifyAndGetTask(taskId, JobACL.MODIFY_JOB); + verifyAndGetTask(taskId, true); appContext.getEventHandler().handle( new TaskEvent(taskId, TaskEventType.T_KILL)); KillTaskResponse response = @@ -316,12 +302,9 @@ public class MRClientService extends AbstractService public KillTaskAttemptResponse killTaskAttempt( KillTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Kill task attempt " + taskAttemptId - + " received from " + callerUGI + " at " - + Server.getRemoteAddress(); + String message = "Kill task attempt received from client " + taskAttemptId; LOG.info(message); - verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); + verifyAndGetAttempt(taskAttemptId, true); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -339,8 +322,8 @@ public class MRClientService extends AbstractService GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class); - response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, - JobACL.VIEW_JOB).getDiagnostics()); + response.addAllDiagnostics( + verifyAndGetAttempt(taskAttemptId, false).getDiagnostics()); return response; } @@ -349,12 +332,9 @@ public class MRClientService extends AbstractService public FailTaskAttemptResponse failTaskAttempt( FailTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Fail task attempt " + taskAttemptId - + " received from " + callerUGI + " at " - + Server.getRemoteAddress(); + String message = "Fail task attempt received from client " + taskAttemptId; LOG.info(message); - verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); + verifyAndGetAttempt(taskAttemptId, true); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -376,7 +356,7 @@ public class MRClientService extends AbstractService GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); Collection tasks = job.getTasks(taskType).values(); LOG.info("Getting task report for " + taskType + " " + jobId + ". Report-size will be " + tasks.size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java index b17b8ce7adc..34b8dc76354 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java @@ -18,20 +18,13 @@ package org.apache.hadoop.mapreduce.v2.app; -import static org.junit.Assert.fail; - -import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.List; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.JobACL; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; @@ -39,9 +32,6 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompleti import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -61,8 +51,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -181,79 +169,6 @@ public class TestMRClientService { app.waitForState(job, JobState.SUCCEEDED); } - @Test - public void testViewAclOnlyCannotModify() throws Exception { - final MRAppWithClientService app = new MRAppWithClientService(1, 0, false); - final Configuration conf = new Configuration(); - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser"); - Job job = app.submit(conf); - app.waitForState(job, JobState.RUNNING); - Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size()); - Iterator it = job.getTasks().values().iterator(); - Task task = it.next(); - app.waitForState(task, TaskState.RUNNING); - TaskAttempt attempt = task.getAttempts().values().iterator().next(); - app.waitForState(attempt, TaskAttemptState.RUNNING); - - UserGroupInformation viewOnlyUser = - UserGroupInformation.createUserForTesting( - "viewonlyuser", new String[] {}); - Assert.assertTrue("viewonlyuser cannot view job", - job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB)); - Assert.assertFalse("viewonlyuser can modify job", - job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB)); - MRClientProtocol client = viewOnlyUser.doAs( - new PrivilegedExceptionAction() { - @Override - public MRClientProtocol run() throws Exception { - YarnRPC rpc = YarnRPC.create(conf); - return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, - app.clientService.getBindAddress(), conf); - } - }); - - KillJobRequest killJobRequest = recordFactory.newRecordInstance( - KillJobRequest.class); - killJobRequest.setJobId(app.getJobId()); - try { - client.killJob(killJobRequest); - fail("viewonlyuser killed job"); - } catch (AccessControlException e) { - // pass - } - - KillTaskRequest killTaskRequest = recordFactory.newRecordInstance( - KillTaskRequest.class); - killTaskRequest.setTaskId(task.getID()); - try { - client.killTask(killTaskRequest); - fail("viewonlyuser killed task"); - } catch (AccessControlException e) { - // pass - } - - KillTaskAttemptRequest killTaskAttemptRequest = - recordFactory.newRecordInstance(KillTaskAttemptRequest.class); - killTaskAttemptRequest.setTaskAttemptId(attempt.getID()); - try { - client.killTaskAttempt(killTaskAttemptRequest); - fail("viewonlyuser killed task attempt"); - } catch (AccessControlException e) { - // pass - } - - FailTaskAttemptRequest failTaskAttemptRequest = - recordFactory.newRecordInstance(FailTaskAttemptRequest.class); - failTaskAttemptRequest.setTaskAttemptId(attempt.getID()); - try { - client.failTaskAttempt(failTaskAttemptRequest); - fail("viewonlyuser killed task attempt"); - } catch (AccessControlException e) { - // pass - } - } - private void verifyJobReport(JobReport jr) { Assert.assertNotNull("JobReport is null", jr); List amInfos = jr.getAMInfos(); From d1dfa5f923408fea94fe18b7886ead4573560e6a Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 22 Aug 2013 21:13:51 +0000 Subject: [PATCH 057/153] HADOOP-9887. globStatus does not correctly handle paths starting with a drive spec on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516608 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/fs/Globber.java | 22 ++++++++++++++----- 2 files changed, 19 insertions(+), 6 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index a2d1304ead7..7b749b7a1bd 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -420,6 +420,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9880. SASL changes from HADOOP-9421 breaks Secure HA NN. (daryn via jing9) + HADOOP-9887. globStatus does not correctly handle paths starting with a drive + spec on Windows. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index 57ad45e81d4..b0bd8490715 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -97,7 +97,7 @@ class Globber { /** * Translate an absolute path into a list of path components. * We merge double slashes into a single slash here. - * The first path component (i.e. root) does not get an entry in the list. + * POSIX root path, i.e. '/', does not get an entry in the list. */ private static List getPathComponents(String path) throws IOException { @@ -167,8 +167,8 @@ class Globber { // Get the absolute path for this flattened pattern. We couldn't do // this prior to flattening because of patterns like {/,a}, where which // path you go down influences how the path must be made absolute. - Path absPattern = - fixRelativePart(new Path(flatPattern .isEmpty() ? "." : flatPattern )); + Path absPattern = fixRelativePart(new Path( + flatPattern.isEmpty() ? Path.CUR_DIR : flatPattern)); // Now we break the flattened, absolute pattern into path components. // For example, /a/*/c would be broken into the list [a, *, c] List components = @@ -176,9 +176,19 @@ class Globber { // Starting out at the root of the filesystem, we try to match // filesystem entries against pattern components. ArrayList candidates = new ArrayList(1); - candidates.add(new FileStatus(0, true, 0, 0, 0, - new Path(scheme, authority, "/"))); - + if (Path.WINDOWS && !components.isEmpty() + && Path.isWindowsAbsolutePath(absPattern.toUri().getPath(), true)) { + // On Windows the path could begin with a drive letter, e.g. /E:/foo. + // We will skip matching the drive letter and start from listing the + // root of the filesystem on that drive. + String driveLetter = components.remove(0); + candidates.add(new FileStatus(0, true, 0, 0, 0, new Path(scheme, + authority, Path.SEPARATOR + driveLetter + Path.SEPARATOR))); + } else { + candidates.add(new FileStatus(0, true, 0, 0, 0, + new Path(scheme, authority, Path.SEPARATOR))); + } + for (String component : components) { ArrayList newCandidates = new ArrayList(candidates.size()); From 200220e8f3684258c281736fd31f0ebdcbac91f0 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 22 Aug 2013 21:39:30 +0000 Subject: [PATCH 058/153] MAPREDUCE-5470. LocalJobRunner does not work on Windows. Contributed by Sandy Ryza. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516623 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../apache/hadoop/mapred/LocalJobRunner.java | 17 ++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index d0345594acc..85650bd504a 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -234,6 +234,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5468. Fix MR AM recovery for map-only jobs. (vinodkv via acmurthy) + MAPREDUCE-5470. LocalJobRunner does not work on Windows. (Sandy Ryza via + cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java index 95c272d3d25..2bb7dc83655 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java @@ -227,7 +227,7 @@ public class LocalJobRunner implements ClientProtocol { info.getSplitIndex(), 1); map.setUser(UserGroupInformation.getCurrentUser(). getShortUserName()); - setupChildMapredLocalDirs(localJobDir, map, localConf); + setupChildMapredLocalDirs(map, localConf); MapOutputFile mapOutput = new MROutputFiles(); mapOutput.setConf(localConf); @@ -305,7 +305,7 @@ public class LocalJobRunner implements ClientProtocol { reduceId, taskId, mapIds.size(), 1); reduce.setUser(UserGroupInformation.getCurrentUser(). getShortUserName()); - setupChildMapredLocalDirs(localJobDir, reduce, localConf); + setupChildMapredLocalDirs(reduce, localConf); reduce.setLocalMapFiles(mapOutputFiles); if (!Job.this.isInterrupted()) { @@ -958,16 +958,18 @@ public class LocalJobRunner implements ClientProtocol { throw new UnsupportedOperationException("Not supported"); } - static void setupChildMapredLocalDirs(Path localJobDir, Task t, JobConf conf) { + static void setupChildMapredLocalDirs(Task t, JobConf conf) { String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR); + String jobId = t.getJobID().toString(); String taskId = t.getTaskID().toString(); boolean isCleanup = t.isTaskCleanupTask(); + String user = t.getUser(); StringBuffer childMapredLocalDir = new StringBuffer(localDirs[0] + Path.SEPARATOR - + getLocalTaskDir(localJobDir, taskId, isCleanup)); + + getLocalTaskDir(user, jobId, taskId, isCleanup)); for (int i = 1; i < localDirs.length; i++) { childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR - + getLocalTaskDir(localJobDir, taskId, isCleanup)); + + getLocalTaskDir(user, jobId, taskId, isCleanup)); } LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir); conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString()); @@ -976,9 +978,10 @@ public class LocalJobRunner implements ClientProtocol { static final String TASK_CLEANUP_SUFFIX = ".cleanup"; static final String JOBCACHE = "jobcache"; - static String getLocalTaskDir(Path localJobDir, String taskid, + static String getLocalTaskDir(String user, String jobid, String taskid, boolean isCleanupAttempt) { - String taskDir = localJobDir.toString() + Path.SEPARATOR + taskid; + String taskDir = jobDir + Path.SEPARATOR + user + Path.SEPARATOR + JOBCACHE + + Path.SEPARATOR + jobid + Path.SEPARATOR + taskid; if (isCleanupAttempt) { taskDir = taskDir + TASK_CLEANUP_SUFFIX; } From 740f4cb97a4d5ec498f6e91d91ee7e75ad1c52c2 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Thu, 22 Aug 2013 23:17:12 +0000 Subject: [PATCH 059/153] MAPREDUCE-5476. Changed MR AM recovery code to cleanup staging-directory only after unregistering from the RM. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516660 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../hadoop/mapreduce/v2/app/MRAppMaster.java | 31 ++++++----- .../mapreduce/v2/app/TestStagingCleanup.java | 52 +++++++++++++++---- 3 files changed, 64 insertions(+), 22 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 85650bd504a..6de12d2e9c1 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -237,6 +237,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5470. LocalJobRunner does not work on Windows. (Sandy Ryza via cnauroth) + MAPREDUCE-5476. Changed MR AM recovery code to cleanup staging-directory + only after unregistering from the RM. (Jian He via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index ab1c4feadf0..e6df1fcad38 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -325,18 +325,23 @@ public class MRAppMaster extends CompositeService { dispatcher.register(org.apache.hadoop.mapreduce.jobhistory.EventType.class, eater); } - + + if (copyHistory) { + // Now that there's a FINISHING state for application on RM to give AMs + // plenty of time to clean up after unregister it's safe to clean staging + // directory after unregistering with RM. So, we start the staging-dir + // cleaner BEFORE the ContainerAllocator so that on shut-down, + // ContainerAllocator unregisters first and then the staging-dir cleaner + // deletes staging directory. + addService(createStagingDirCleaningService()); + } + // service to allocate containers from RM (if non-uber) or to fake it (uber) containerAllocator = createContainerAllocator(null, context); addIfService(containerAllocator); dispatcher.register(ContainerAllocator.EventType.class, containerAllocator); if (copyHistory) { - // Add the staging directory cleaner before the history server but after - // the container allocator so the staging directory is cleaned after - // the history has been flushed but before unregistering with the RM. - addService(createStagingDirCleaningService()); - // Add the JobHistoryEventHandler last so that it is properly stopped first. // This will guarantee that all history-events are flushed before AM goes // ahead with shutdown. @@ -344,7 +349,6 @@ public class MRAppMaster extends CompositeService { // component creates a JobHistoryEvent in the meanwhile, it will be just be // queued inside the JobHistoryEventHandler addIfService(historyService); - JobHistoryCopyService cpHist = new JobHistoryCopyService(appAttemptID, dispatcher.getEventHandler()); @@ -396,6 +400,14 @@ public class MRAppMaster extends CompositeService { dispatcher.register(Speculator.EventType.class, speculatorEventDispatcher); + // Now that there's a FINISHING state for application on RM to give AMs + // plenty of time to clean up after unregister it's safe to clean staging + // directory after unregistering with RM. So, we start the staging-dir + // cleaner BEFORE the ContainerAllocator so that on shut-down, + // ContainerAllocator unregisters first and then the staging-dir cleaner + // deletes staging directory. + addService(createStagingDirCleaningService()); + // service to allocate containers from RM (if non-uber) or to fake it (uber) addIfService(containerAllocator); dispatcher.register(ContainerAllocator.EventType.class, containerAllocator); @@ -405,11 +417,6 @@ public class MRAppMaster extends CompositeService { addIfService(containerLauncher); dispatcher.register(ContainerLauncher.EventType.class, containerLauncher); - // Add the staging directory cleaner before the history server but after - // the container allocator so the staging directory is cleaned after - // the history has been flushed but before unregistering with the RM. - addService(createStagingDirCleaningService()); - // Add the JobHistoryEventHandler last so that it is properly stopped first. // This will guarantee that all history-events are flushed before AM goes // ahead with shutdown. diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index 46aa8984b90..a0c0cb6c35f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -36,6 +36,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; @@ -54,6 +56,7 @@ import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; @@ -279,14 +282,17 @@ import org.junit.Test; } private final class MRAppTestCleanup extends MRApp { - boolean stoppedContainerAllocator; - boolean cleanedBeforeContainerAllocatorStopped; - + int stagingDirCleanedup; + int ContainerAllocatorStopped; + int JobHistoryEventHandlerStopped; + int numStops; public MRAppTestCleanup(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) { super(maps, reduces, autoComplete, testName, cleanOnStart); - stoppedContainerAllocator = false; - cleanedBeforeContainerAllocatorStopped = false; + stagingDirCleanedup = 0; + ContainerAllocatorStopped = 0; + JobHistoryEventHandlerStopped = 0; + numStops = 0; } @Override @@ -312,6 +318,26 @@ import org.junit.Test; return newJob; } + @Override + protected EventHandler createJobHistoryHandler( + AppContext context) { + return new TestJobHistoryEventHandler(context, getStartCount()); + } + + private class TestJobHistoryEventHandler extends JobHistoryEventHandler { + + public TestJobHistoryEventHandler(AppContext context, int startCount) { + super(context, startCount); + } + + @Override + public void serviceStop() throws Exception { + numStops++; + JobHistoryEventHandlerStopped = numStops; + super.serviceStop(); + } + } + @Override protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context) { @@ -334,7 +360,8 @@ import org.junit.Test; @Override protected void serviceStop() throws Exception { - stoppedContainerAllocator = true; + numStops++; + ContainerAllocatorStopped = numStops; super.serviceStop(); } } @@ -346,7 +373,8 @@ import org.junit.Test; @Override public void cleanupStagingDir() throws IOException { - cleanedBeforeContainerAllocatorStopped = !stoppedContainerAllocator; + numStops++; + stagingDirCleanedup = numStops; } @Override @@ -377,11 +405,15 @@ import org.junit.Test; app.verifyCompleted(); int waitTime = 20 * 1000; - while (waitTime > 0 && !app.cleanedBeforeContainerAllocatorStopped) { + while (waitTime > 0 && app.numStops < 3 ) { Thread.sleep(100); waitTime -= 100; } - Assert.assertTrue("Staging directory not cleaned before notifying RM", - app.cleanedBeforeContainerAllocatorStopped); + + // assert JobHistoryEventHandlerStopped first, then + // ContainerAllocatorStopped, and then stagingDirCleanedup + Assert.assertEquals(1, app.JobHistoryEventHandlerStopped); + Assert.assertEquals(2, app.ContainerAllocatorStopped); + Assert.assertEquals(3, app.stagingDirCleanedup); } } From 7cfbfe52f1bc33940dc06c861b0847aad87ef1f2 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 23 Aug 2013 00:55:53 +0000 Subject: [PATCH 060/153] HDFS-5124. DelegationTokenSecretManager#retrievePassword can cause deadlock in NameNode. Contributed by Daryn Sharp. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516671 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../delegation/DelegationTokenSecretManager.java | 15 ++------------- 2 files changed, 5 insertions(+), 13 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0fe0fd84009..538134f5d87 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -383,6 +383,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4594. WebHDFS open sets Content-Length header to what is specified by length parameter rather than how much data is actually returned. (cnauroth) + HDFS-5124. DelegationTokenSecretManager#retrievePassword can cause deadlock + in NameNode. (Daryn Sharp via jing9) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 17e2ccc61ef..25fb25731f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -82,7 +82,7 @@ public class DelegationTokenSecretManager } @Override - public synchronized byte[] retrievePassword( + public byte[] retrievePassword( DelegationTokenIdentifier identifier) throws InvalidToken { try { // this check introduces inconsistency in the authentication to a @@ -91,7 +91,7 @@ public class DelegationTokenSecretManager // different in that a standby may be behind and thus not yet know // of all tokens issued by the active NN. the following check does // not allow ANY token auth, however it should allow known tokens in - checkAvailableForRead(); + namesystem.checkOperation(OperationCategory.READ); } catch (StandbyException se) { // FIXME: this is a hack to get around changing method signatures by // tunneling a non-InvalidToken exception as the cause which the @@ -103,17 +103,6 @@ public class DelegationTokenSecretManager return super.retrievePassword(identifier); } - @Override //SecretManager - public void checkAvailableForRead() throws StandbyException { - namesystem.checkOperation(OperationCategory.READ); - namesystem.readLock(); - try { - namesystem.checkOperation(OperationCategory.READ); - } finally { - namesystem.readUnlock(); - } - } - /** * Returns expiry time of a token given its identifier. * From e588ffe3c09c74c94eb6bfc3071310191fbf9e1f Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Fri, 23 Aug 2013 15:48:43 +0000 Subject: [PATCH 061/153] HDFS-4329. DFSShell issues with directories with spaces in name (Cristina L. Abad via jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1516904 13f79535-47bb-0310-9956-ffa450edef68 --- .../org/apache/hadoop/fs/shell/PathData.java | 26 +-- .../hadoop/cli/util/CommandExecutor.java | 41 ++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../src/test/resources/testHDFSConf.xml | 187 ++++++++++++++++-- 4 files changed, 211 insertions(+), 46 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java index 88a90c6e6ff..84bb2347671 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/PathData.java @@ -106,10 +106,12 @@ public class PathData implements Comparable { /** * Validates the given Windows path. - * Throws IOException on failure. * @param pathString a String of the path suppliued by the user. + * @return true if the URI scheme was not present in the pathString but + * inferred; false, otherwise. + * @throws IOException if anything goes wrong */ - private void ValidateWindowsPath(String pathString) + private static boolean checkIfSchemeInferredFromPath(String pathString) throws IOException { if (windowsNonUriAbsolutePath1.matcher(pathString).find()) { @@ -118,23 +120,21 @@ public class PathData implements Comparable { throw new IOException("Invalid path string " + pathString); } - inferredSchemeFromPath = true; - return; + return true; } // Is it a forward slash-separated absolute path? if (windowsNonUriAbsolutePath2.matcher(pathString).find()) { - inferredSchemeFromPath = true; - return; + return true; } // Does it look like a URI? If so then just leave it alone. if (potentialUri.matcher(pathString).find()) { - return; + return false; } // Looks like a relative path on Windows. - return; + return false; } /** @@ -153,7 +153,7 @@ public class PathData implements Comparable { setStat(stat); if (Path.WINDOWS) { - ValidateWindowsPath(pathString); + inferredSchemeFromPath = checkIfSchemeInferredFromPath(pathString); } } @@ -302,7 +302,7 @@ public class PathData implements Comparable { // check getPath() so scheme slashes aren't considered part of the path String separator = uri.getPath().endsWith(Path.SEPARATOR) ? "" : Path.SEPARATOR; - return uri + separator + basename; + return uriToString(uri, inferredSchemeFromPath) + separator + basename; } protected enum PathType { HAS_SCHEME, SCHEMELESS_ABSOLUTE, RELATIVE }; @@ -356,7 +356,7 @@ public class PathData implements Comparable { if (globUri.getAuthority() == null) { matchUri = removeAuthority(matchUri); } - globMatch = matchUri.toString(); + globMatch = uriToString(matchUri, false); break; case SCHEMELESS_ABSOLUTE: // take just the uri's path globMatch = matchUri.getPath(); @@ -438,6 +438,10 @@ public class PathData implements Comparable { */ @Override public String toString() { + return uriToString(uri, inferredSchemeFromPath); + } + + private static String uriToString(URI uri, boolean inferredSchemeFromPath) { String scheme = uri.getScheme(); // No interpretation of symbols. Just decode % escaped chars. String decodedRemainder = uri.getSchemeSpecificPart(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java index a250e246278..79df284045b 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/util/CommandExecutor.java @@ -24,6 +24,9 @@ import java.io.ByteArrayOutputStream; import java.io.File; import java.io.PrintStream; import java.util.StringTokenizer; +import java.util.regex.Matcher; +import java.util.regex.Pattern; +import java.util.ArrayList; /** * @@ -32,23 +35,31 @@ import java.util.StringTokenizer; public abstract class CommandExecutor { protected String[] getCommandAsArgs(final String cmd, final String masterKey, final String master) { - StringTokenizer tokenizer = new StringTokenizer(cmd, " "); - String[] args = new String[tokenizer.countTokens()]; - - int i = 0; - while (tokenizer.hasMoreTokens()) { - args[i] = tokenizer.nextToken(); + String regex = "\'([^\']*)\'|\"([^\"]*)\"|(\\S+)"; + Matcher matcher = Pattern.compile(regex).matcher(cmd); - args[i] = args[i].replaceAll(masterKey, master); - args[i] = args[i].replaceAll("CLITEST_DATA", - new File(CLITestHelper.TEST_CACHE_DATA_DIR). - toURI().toString().replace(' ', '+')); - args[i] = args[i].replaceAll("USERNAME", System.getProperty("user.name")); + ArrayList args = new ArrayList(); + String arg = null; - i++; - } - - return args; + while (matcher.find()) { + if (matcher.group(1) != null) { + arg = matcher.group(1); + } else if (matcher.group(2) != null) { + arg = matcher.group(2); + } else { + arg = matcher.group(3); + } + + arg = arg.replaceAll(masterKey, master); + arg = arg.replaceAll("CLITEST_DATA", + new File(CLITestHelper.TEST_CACHE_DATA_DIR). + toURI().toString().replace(' ', '+')); + arg = arg.replaceAll("USERNAME", System.getProperty("user.name")); + + args.add(arg); + } + + return args.toArray(new String[0]); } public Result executeCommand(final String cmd) throws Exception { diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 538134f5d87..64ea20b06e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -3435,6 +3435,9 @@ Release 0.23.10 - UNRELEASED HDFS-4998. TestUnderReplicatedBlocks fails intermittently (kihwal) + HDFS-4329. DFSShell issues with directories with spaces in name (Cristina + L. Abad via jeagles) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index 44d2b32f33c..563d51a841d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -443,6 +443,153 @@ + + ls: whitespaces in an absolute path to a file + + -fs NAMENODE -mkdir -p "/a path with/whitespaces in directories" + -fs NAMENODE -touchz "/a path with/whitespaces in directories/and file names" + -fs NAMENODE -ls "/a path with/whitespaces in directories" + + + -fs NAMENODE -rm -r "/a path with" + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/a path with/whitespaces in directories/and file names + + + + + + ls: whitespaces in a relative path to a file + + -fs NAMENODE -mkdir -p "a path with/whitespaces in directories" + -fs NAMENODE -touchz "a path with/whitespaces in directories/and file names" + -fs NAMENODE -ls "a path with/whitespaces in directories" + + + -fs NAMENODE -rm -r "a path with" + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*a path with/whitespaces in directories/and file names + + + + + + ls: whitespaces in a scheme-qualified path to a file + + -fs NAMENODE -mkdir -p "NAMENODE/a path with/whitespaces in directories" + -fs NAMENODE -touchz "NAMENODE/a path with/whitespaces in directories/and file names" + -fs NAMENODE -ls "NAMENODE/a path with/whitespaces in directories" + + + -fs NAMENODE -rm -r "NAMENODE/a path with" + + + + TokenComparator + Found 1 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/a path with/whitespaces in directories/and file names + + + + + + ls: whitespaces in an absolute path to a file, using globbing + + -fs NAMENODE -mkdir -p "/a path with/whitespaces in directories" + -fs NAMENODE -touchz "/a path with/whitespaces in directories/and file names" + -fs NAMENODE -touchz "/a path with/whitespaces in directories/and file names 2" + -fs NAMENODE -ls "/a*/w*" + + + -fs NAMENODE -rm -r "/a path with" + + + + TokenComparator + Found 2 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/a path with/whitespaces in directories/and file names + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*/a path with/whitespaces in directories/and file names 2 + + + + + + ls: whitespaces in a relative path to a file, using globbing + + -fs NAMENODE -mkdir -p "a path with/whitespaces in directories" + -fs NAMENODE -touchz "a path with/whitespaces in directories/and file names" + -fs NAMENODE -touchz "a path with/whitespaces in directories/and file names 2" + -fs NAMENODE -ls "a*/w*" + + + -fs NAMENODE -rm -r "a path with" + + + + TokenComparator + Found 2 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*a path with/whitespaces in directories/and file names + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*a path with/whitespaces in directories/and file names 2 + + + + + + ls: whitespaces in a scheme-qualified path to a file, using globbing + + -fs NAMENODE -mkdir -p "NAMENODE/a path with/whitespaces in directories" + -fs NAMENODE -touchz "NAMENODE/a path with/whitespaces in directories/and file names" + -fs NAMENODE -touchz "NAMENODE/a path with/whitespaces in directories/and file names 2" + -fs NAMENODE -ls "NAMENODE/a*/w*" + + + -fs NAMENODE -rm -r "NAMENODE/a path with" + + + + TokenComparator + Found 2 items + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/a path with/whitespaces in directories/and file names + + + RegexpComparator + ^-rw-r--r--( )*1( )*[a-z]*( )*supergroup( )*0( )*[0-9]{4,}-[0-9]{2,}-[0-9]{2,} [0-9]{2,}:[0-9]{2,}( )*NAMENODE/a path with/whitespaces in directories/and file names 2 + + + + ls: files/directories using absolute path @@ -6503,23 +6650,23 @@ TokenComparator - "data15bytes-15" + data15bytes-15 TokenComparator - "data30bytes-30" + data30bytes-30 TokenComparator - "data60bytes-60" + data60bytes-60 TokenComparator - "data120bytes-120" + data120bytes-120 TokenComparator - "datadir-0" + datadir-0 @@ -6542,23 +6689,23 @@ TokenComparator - "data15bytes-15" + data15bytes-15 TokenComparator - "data30bytes-30" + data30bytes-30 TokenComparator - "data60bytes-60" + data60bytes-60 TokenComparator - "data120bytes-120" + data120bytes-120 TokenComparator - "datadir-0" + datadir-0 @@ -6644,23 +6791,23 @@ TokenComparator - "data15bytes-15" + data15bytes-15 TokenComparator - "data30bytes-30" + data30bytes-30 TokenComparator - "data60bytes-60" + data60bytes-60 TokenComparator - "data120bytes-120" + data120bytes-120 TokenComparator - "datadir-0" + datadir-0 @@ -6731,23 +6878,23 @@ TokenComparator - "data15bytes-15" + data15bytes-15 TokenComparator - "data30bytes-30" + data30bytes-30 TokenComparator - "data60bytes-60" + data60bytes-60 TokenComparator - "data120bytes-120" + data120bytes-120 TokenComparator - "datadir-0" + datadir-0 From 15632cd76f12c2f7df50d0df4865fbe3d8261597 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Fri, 23 Aug 2013 21:14:43 +0000 Subject: [PATCH 062/153] HDFS-4947 Add NFS server export table to control export by hostname or IP range. Contributed by Jing Zhao git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517040 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/nfs/nfs3/Nfs3Constant.java | 12 + .../apache/hadoop/nfs/nfs3/Nfs3Interface.java | 37 +- .../hdfs/nfs/mount/RpcProgramMountd.java | 19 +- .../org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java | 9 +- .../hadoop/hdfs/nfs/nfs3/Nfs3Utils.java | 1 + .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 377 ++++++++++++------ .../hdfs/nfs/security/AccessPrivilege.java | 24 ++ .../hadoop/hdfs/nfs/security/NfsExports.java | 354 ++++++++++++++++ .../hdfs/nfs/security/TestNfsExports.java | 191 +++++++++ hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + 10 files changed, 884 insertions(+), 143 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index 889a9c9ce1c..1701cc12dd8 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -189,4 +189,16 @@ public class Nfs3Constant { public final static int CREATE_UNCHECKED = 0; public final static int CREATE_GUARDED = 1; public final static int CREATE_EXCLUSIVE = 2; + + public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";"; + /** Allowed hosts for nfs exports */ + public static final String EXPORTS_ALLOWED_HOSTS_KEY = "hdfs.nfs.exports.allowed.hosts"; + public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw"; + /** Size for nfs exports cache */ + public static final String EXPORTS_CACHE_SIZE_KEY = "hdfs.nfs.exports.cache.size"; + public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512; + /** Expiration time for nfs exports cache entry */ + public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "hdfs.nfs.exports.cache.expirytime.millis"; + public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min + } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java index 606feb61325..678631174dd 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.nfs.nfs3; +import java.net.InetAddress; + import org.apache.hadoop.nfs.nfs3.response.NFS3Response; import org.apache.hadoop.oncrpc.RpcAuthSys; import org.apache.hadoop.oncrpc.XDR; @@ -31,53 +33,54 @@ public interface Nfs3Interface { public NFS3Response nullProcedure(); /** GETATTR: Get file attributes */ - public NFS3Response getattr(XDR xdr, RpcAuthSys authSys); + public NFS3Response getattr(XDR xdr, RpcAuthSys authSys, InetAddress client); /** SETATTR: Set file attributes */ - public NFS3Response setattr(XDR xdr, RpcAuthSys authSys); + public NFS3Response setattr(XDR xdr, RpcAuthSys authSys, InetAddress client); /** LOOKUP: Lookup filename */ - public NFS3Response lookup(XDR xdr, RpcAuthSys authSys); + public NFS3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client); /** ACCESS: Check access permission */ - public NFS3Response access(XDR xdr, RpcAuthSys authSys); + public NFS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client); /** READ: Read from file */ - public NFS3Response read(XDR xdr, RpcAuthSys authSys); + public NFS3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client); /** WRITE: Write to file */ - public NFS3Response write(XDR xdr, Channel channel, int xid, RpcAuthSys authSys); + public NFS3Response write(XDR xdr, Channel channel, int xid, + RpcAuthSys authSys, InetAddress client); /** CREATE: Create a file */ - public NFS3Response create(XDR xdr, RpcAuthSys authSys); + public NFS3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client); /** MKDIR: Create a directory */ - public NFS3Response mkdir(XDR xdr, RpcAuthSys authSys); + public NFS3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client); /** REMOVE: Remove a file */ - public NFS3Response remove(XDR xdr, RpcAuthSys authSys); + public NFS3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client); /** RMDIR: Remove a directory */ - public NFS3Response rmdir(XDR xdr, RpcAuthSys authSys); + public NFS3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client); /** RENAME: Rename a file or directory */ - public NFS3Response rename(XDR xdr, RpcAuthSys authSys); + public NFS3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client); /** SYMLINK: Create a symbolic link */ - public NFS3Response symlink(XDR xdr, RpcAuthSys authSys); + public NFS3Response symlink(XDR xdr, RpcAuthSys authSys, InetAddress client); /** READDIR: Read From directory */ - public NFS3Response readdir(XDR xdr, RpcAuthSys authSys); + public NFS3Response readdir(XDR xdr, RpcAuthSys authSys, InetAddress client); /** FSSTAT: Get dynamic file system information */ - public NFS3Response fsstat(XDR xdr, RpcAuthSys authSys); + public NFS3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client); /** FSINFO: Get static file system information */ - public NFS3Response fsinfo(XDR xdr, RpcAuthSys authSys); + public NFS3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client); /** PATHCONF: Retrieve POSIX information */ - public NFS3Response pathconf(XDR xdr, RpcAuthSys authSys); + public NFS3Response pathconf(XDR xdr, RpcAuthSys authSys, InetAddress client); /** COMMIT: Commit cached data on a server to stable storage */ - public NFS3Response commit(XDR xdr, RpcAuthSys authSys); + public NFS3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client); } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index bf2fe2ae496..cec235c7d4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -27,6 +27,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; +import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.mount.MountEntry; @@ -59,6 +61,8 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { /** List that is unmodifiable */ private final List exports; + + private final NfsExports hostsMatcher; public RpcProgramMountd() throws IOException { this(new ArrayList(0)); @@ -72,19 +76,29 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { throws IOException { // Note that RPC cache is not enabled super("mountd", "localhost", PORT, PROGRAM, VERSION_1, VERSION_3, 0); + + this.hostsMatcher = NfsExports.getInstance(config); this.mounts = Collections.synchronizedList(new ArrayList()); this.exports = Collections.unmodifiableList(exports); this.dfsClient = new DFSClient(NameNode.getAddress(config), config); } + @Override public XDR nullOp(XDR out, int xid, InetAddress client) { if (LOG.isDebugEnabled()) { LOG.debug("MOUNT NULLOP : " + " client: " + client); } - return RpcAcceptedReply.voidReply(out, xid); + return RpcAcceptedReply.voidReply(out, xid); } + @Override public XDR mnt(XDR xdr, XDR out, int xid, InetAddress client) { + AccessPrivilege accessPrivilege = hostsMatcher.getAccessPrivilege(client); + if (accessPrivilege == AccessPrivilege.NONE) { + return MountResponse.writeMNTResponse(Nfs3Status.NFS3ERR_ACCES, out, xid, + null); + } + String path = xdr.readString(); if (LOG.isDebugEnabled()) { LOG.debug("MOUNT MNT path: " + path + " client: " + client); @@ -121,6 +135,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { return out; } + @Override public XDR dump(XDR out, int xid, InetAddress client) { if (LOG.isDebugEnabled()) { LOG.debug("MOUNT NULLOP : " + " client: " + client); @@ -131,6 +146,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { return out; } + @Override public XDR umnt(XDR xdr, XDR out, int xid, InetAddress client) { String path = xdr.readString(); if (LOG.isDebugEnabled()) { @@ -143,6 +159,7 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { return out; } + @Override public XDR umntall(XDR out, int xid, InetAddress client) { if (LOG.isDebugEnabled()) { LOG.debug("MOUNT UMNTALL : " + " client: " + client); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java index 50d846651df..46bc838aa57 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3.java @@ -32,12 +32,17 @@ import org.apache.hadoop.util.StringUtils; * Only TCP server is supported and UDP is not supported. */ public class Nfs3 extends Nfs3Base { + static { + Configuration.addDefaultResource("hdfs-default.xml"); + Configuration.addDefaultResource("hdfs-site.xml"); + } + public Nfs3(List exports) throws IOException { - super(new Mountd(exports), new RpcProgramNfs3(exports)); + super(new Mountd(exports), new RpcProgramNfs3()); } public Nfs3(List exports, Configuration config) throws IOException { - super(new Mountd(exports, config), new RpcProgramNfs3(exports, config)); + super(new Mountd(exports, config), new RpcProgramNfs3(config)); } public static void main(String[] args) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java index fcb4bcf09e5..592106f5aa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/Nfs3Utils.java @@ -88,6 +88,7 @@ public class Nfs3Utils { return new WccAttr(attr.getSize(), attr.getMtime(), attr.getCtime()); } + // TODO: maybe not efficient public static WccData createWccData(final WccAttr preOpAttr, DFSClient dfsClient, final String fileIdPath, final IdUserGroup iug) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index e96b537d1f2..d8694198b95 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -22,22 +22,23 @@ import java.io.IOException; import java.net.InetAddress; import java.nio.ByteBuffer; import java.util.EnumSet; -import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; -import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.FileSystem.Statistics; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsStatus; +import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSInputStream; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; +import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; +import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -125,6 +126,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private final IdUserGroup iug;// = new IdUserGroup(); private final DFSClientCache clientCache; + private final NfsExports exports; + /** * superUserClient should always impersonate HDFS file system owner to send * requests which requires supergroup privilege. This requires the same user @@ -138,17 +141,19 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private Statistics statistics; private String writeDumpDir; // The dir save dump files - public RpcProgramNfs3(List exports) throws IOException { - this(exports, new Configuration()); + public RpcProgramNfs3() throws IOException { + this(new Configuration()); } - public RpcProgramNfs3(List exports, Configuration config) + public RpcProgramNfs3(Configuration config) throws IOException { super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION, Nfs3Constant.VERSION, 100); config.set(FsPermission.UMASK_LABEL, "000"); iug = new IdUserGroup(); + + exports = NfsExports.getInstance(config); writeManager = new WriteManager(iug, config); clientCache = new DFSClientCache(config); superUserClient = new DFSClient(NameNode.getAddress(config), config); @@ -185,7 +190,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { /****************************************************** * RPC call handlers ******************************************************/ - + + @Override public NFS3Response nullProcedure() { if (LOG.isDebugEnabled()) { LOG.debug("NFS NULL"); @@ -193,8 +199,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return new VoidResponse(Nfs3Status.NFS3_OK); } - public GETATTR3Response getattr(XDR xdr, RpcAuthSys authSys) { + @Override + public GETATTR3Response getattr(XDR xdr, RpcAuthSys authSys, + InetAddress client) { GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -267,7 +281,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public SETATTR3Response setattr(XDR xdr, RpcAuthSys authSys) { + @Override + public SETATTR3Response setattr(XDR xdr, RpcAuthSys authSys, + InetAddress client) { SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -298,34 +314,39 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } String fileIdPath = Nfs3Utils.getFileIdPath(handle); - WccAttr preOpAttr = null; + Nfs3FileAttributes preOpAttr = null; try { - preOpAttr = Nfs3Utils.getWccAttr(dfsClient, fileIdPath); + preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); if (preOpAttr == null) { LOG.info("Can't get path for fileId:" + handle.getFileId()); response.setStatus(Nfs3Status.NFS3ERR_STALE); return response; } + WccAttr preOpWcc = Nfs3Utils.getWccAttr(preOpAttr); if (request.isCheck()) { if (!preOpAttr.getCtime().equals(request.getCtime())) { - WccData wccData = Nfs3Utils.createWccData(preOpAttr, dfsClient, - fileIdPath, iug); + WccData wccData = new WccData(preOpWcc, preOpAttr); return new SETATTR3Response(Nfs3Status.NFS3ERR_NOT_SYNC, wccData); } } + + // check the write access privilege + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + return new SETATTR3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( + preOpWcc, preOpAttr)); + } setattrInternal(dfsClient, fileIdPath, request.getAttr(), true); Nfs3FileAttributes postOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); - WccData wccData = new WccData(preOpAttr, postOpAttr); + WccData wccData = new WccData(preOpWcc, postOpAttr); return new SETATTR3Response(Nfs3Status.NFS3_OK, wccData); - } catch (IOException e) { LOG.warn("Exception ", e); WccData wccData = null; try { - wccData = Nfs3Utils - .createWccData(preOpAttr, dfsClient, fileIdPath, iug); + wccData = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpAttr), + dfsClient, fileIdPath, iug); } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileIdPath: " + fileIdPath); } @@ -337,8 +358,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public LOOKUP3Response lookup(XDR xdr, RpcAuthSys authSys) { + @Override + public LOOKUP3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client) { LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -392,8 +420,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public ACCESS3Response access(XDR xdr, RpcAuthSys authSys) { + @Override + public ACCESS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client) { ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -434,12 +469,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public READLINK3Response readlink(XDR xdr, RpcAuthSys authSys) { + public READLINK3Response readlink(XDR xdr, RpcAuthSys authSys, + InetAddress client) { return new READLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); } - public READ3Response read(XDR xdr, RpcAuthSys authSys) { + @Override + public READ3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client) { READ3Response response = new READ3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -528,8 +571,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } + @Override public WRITE3Response write(XDR xdr, Channel channel, int xid, - RpcAuthSys authSys) { + RpcAuthSys authSys, InetAddress client) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -570,6 +614,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.error("Can't get path for fileId:" + handle.getFileId()); return new WRITE3Response(Nfs3Status.NFS3ERR_STALE); } + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + return new WRITE3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( + Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), 0, stableHow, + Nfs3Constant.WRITE_COMMIT_VERF); + } + if (LOG.isDebugEnabled()) { LOG.debug("requesed offset=" + offset + " and current filesize=" + preOpAttr.getSize()); @@ -596,7 +647,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return null; } - public CREATE3Response create(XDR xdr, RpcAuthSys authSys) { + @Override + public CREATE3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client) { CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -631,16 +683,22 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { HdfsDataOutputStream fos = null; String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); - WccAttr preOpDirAttr = null; + Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpObjAttr = null; FileHandle fileHandle = null; WccData dirWcc = null; try { - preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath); + preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.error("Can't get path for dirHandle:" + dirHandle); return new CREATE3Response(Nfs3Status.NFS3ERR_STALE); } + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + return new CREATE3Response(Nfs3Status.NFS3ERR_ACCES, null, + preOpDirAttr, new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + preOpDirAttr)); + } String fileIdPath = Nfs3Utils.getFileIdPath(dirHandle) + "/" + fileName; SetAttr3 setAttr3 = request.getObjAttr(); @@ -649,9 +707,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { SetAttrField.MODE) ? new FsPermission((short) setAttr3.getMode()) : FsPermission.getDefault().applyUMask(umask); - EnumSet flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? EnumSet - .of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : EnumSet - .of(CreateFlag.CREATE); + EnumSet flag = (createMode != Nfs3Constant.CREATE_EXCLUSIVE) ? + EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE) : + EnumSet.of(CreateFlag.CREATE); fos = new HdfsDataOutputStream(dfsClient.create(fileIdPath, permission, flag, false, replication, blockSize, null, bufferSize, null), @@ -668,8 +726,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); - dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, dirFileIdPath, - iug); + dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), + dfsClient, dirFileIdPath, iug); } catch (IOException e) { LOG.error("Exception", e); if (fos != null) { @@ -682,8 +740,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } if (dirWcc == null) { try { - dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); + dirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(preOpDirAttr), + dfsClient, dirFileIdPath, iug); } catch (IOException e1) { LOG.error("Can't get postOpDirAttr for dirFileId:" + dirHandle.getFileId()); @@ -712,7 +770,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { dirWcc); } - public MKDIR3Response mkdir(XDR xdr, RpcAuthSys authSys) { + @Override + public MKDIR3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client) { MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -739,17 +798,22 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); - WccAttr preOpDirAttr = null; + Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpDirAttr = null; Nfs3FileAttributes postOpObjAttr = null; FileHandle objFileHandle = null; try { - preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath); + preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId()); return new MKDIR3Response(Nfs3Status.NFS3ERR_STALE); } + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + return new MKDIR3Response(Nfs3Status.NFS3ERR_ACCES, null, preOpDirAttr, + new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), preOpDirAttr)); + } + final String fileIdPath = dirFileIdPath + "/" + fileName; SetAttr3 setAttr3 = request.getObjAttr(); FsPermission permission = setAttr3.getUpdateFields().contains( @@ -757,8 +821,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { : FsPermission.getDefault().applyUMask(umask); if (!dfsClient.mkdirs(fileIdPath, permission, false)) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); + WccData dirWcc = Nfs3Utils.createWccData( + Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); return new MKDIR3Response(Nfs3Status.NFS3ERR_IO, null, null, dirWcc); } @@ -771,8 +835,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { postOpObjAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); objFileHandle = new FileHandle(postOpObjAttr.getFileId()); - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); + WccData dirWcc = Nfs3Utils.createWccData( + Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); return new MKDIR3Response(Nfs3Status.NFS3_OK, new FileHandle( postOpObjAttr.getFileId()), postOpObjAttr, dirWcc); } catch (IOException e) { @@ -785,7 +849,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath); } } - WccData dirWcc = new WccData(preOpDirAttr, postOpDirAttr); + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + postOpDirAttr); if (e instanceof AccessControlException) { return new MKDIR3Response(Nfs3Status.NFS3ERR_PERM, objFileHandle, postOpObjAttr, dirWcc); @@ -796,12 +861,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - - public READDIR3Response mknod(XDR xdr, RpcAuthSys authSys) { + public READDIR3Response mknod(XDR xdr, RpcAuthSys authSys, InetAddress client) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } - public REMOVE3Response remove(XDR xdr, RpcAuthSys authSys) { + @Override + public REMOVE3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client) { REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -825,10 +890,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); - WccAttr preOpDirAttr = null; + Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpDirAttr = null; try { - preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath); + preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId()); return new REMOVE3Response(Nfs3Status.NFS3ERR_STALE); @@ -838,24 +903,23 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath); if (fstat == null) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + preOpDirAttr); return new REMOVE3Response(Nfs3Status.NFS3ERR_NOENT, dirWcc); } if (fstat.isDir()) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + preOpDirAttr); return new REMOVE3Response(Nfs3Status.NFS3ERR_ISDIR, dirWcc); } - if (dfsClient.delete(fileIdPath, false) == false) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); + boolean result = dfsClient.delete(fileIdPath, false); + WccData dirWcc = Nfs3Utils.createWccData( + Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); + + if (!result) { return new REMOVE3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc); } - - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); return new REMOVE3Response(Nfs3Status.NFS3_OK, dirWcc); } catch (IOException e) { LOG.warn("Exception ", e); @@ -867,7 +931,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath); } } - WccData dirWcc = new WccData(preOpDirAttr, postOpDirAttr); + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + postOpDirAttr); if (e instanceof AccessControlException) { return new REMOVE3Response(Nfs3Status.NFS3ERR_PERM, dirWcc); } else { @@ -876,7 +941,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public RMDIR3Response rmdir(XDR xdr, RpcAuthSys authSys) { + @Override + public RMDIR3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client) { RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -901,45 +967,43 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } String dirFileIdPath = Nfs3Utils.getFileIdPath(dirHandle); - WccAttr preOpDirAttr = null; + Nfs3FileAttributes preOpDirAttr = null; Nfs3FileAttributes postOpDirAttr = null; try { - preOpDirAttr = Nfs3Utils.getWccAttr(dfsClient, dirFileIdPath); + preOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); if (preOpDirAttr == null) { LOG.info("Can't get path for dir fileId:" + dirHandle.getFileId()); return new RMDIR3Response(Nfs3Status.NFS3ERR_STALE); } + + WccData errWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + preOpDirAttr); + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, errWcc); + } String fileIdPath = dirFileIdPath + "/" + fileName; HdfsFileStatus fstat = Nfs3Utils.getFileStatus(dfsClient, fileIdPath); if (fstat == null) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); - return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, dirWcc); + return new RMDIR3Response(Nfs3Status.NFS3ERR_NOENT, errWcc); } if (!fstat.isDir()) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); - return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, dirWcc); + return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTDIR, errWcc); } if (fstat.getChildrenNum() > 0) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); - return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, dirWcc); + return new RMDIR3Response(Nfs3Status.NFS3ERR_NOTEMPTY, errWcc); } - if (dfsClient.delete(fileIdPath, false) == false) { - WccData dirWcc = Nfs3Utils.createWccData(preOpDirAttr, dfsClient, - dirFileIdPath, iug); + boolean result = dfsClient.delete(fileIdPath, false); + WccData dirWcc = Nfs3Utils.createWccData( + Nfs3Utils.getWccAttr(preOpDirAttr), dfsClient, dirFileIdPath, iug); + if (!result) { return new RMDIR3Response(Nfs3Status.NFS3ERR_ACCES, dirWcc); } - postOpDirAttr = Nfs3Utils.getFileAttr(dfsClient, dirFileIdPath, iug); - WccData wccData = new WccData(preOpDirAttr, postOpDirAttr); - return new RMDIR3Response(Nfs3Status.NFS3_OK, wccData); - + return new RMDIR3Response(Nfs3Status.NFS3_OK, dirWcc); } catch (IOException e) { LOG.warn("Exception ", e); // Try to return correct WccData @@ -950,7 +1014,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.info("Can't get postOpDirAttr for " + dirFileIdPath); } } - WccData dirWcc = new WccData(preOpDirAttr, postOpDirAttr); + WccData dirWcc = new WccData(Nfs3Utils.getWccAttr(preOpDirAttr), + postOpDirAttr); if (e instanceof AccessControlException) { return new RMDIR3Response(Nfs3Status.NFS3ERR_PERM, dirWcc); } else { @@ -959,7 +1024,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public RENAME3Response rename(XDR xdr, RpcAuthSys authSys) { + @Override + public RENAME3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client) { RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -987,23 +1053,31 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { String fromDirFileIdPath = Nfs3Utils.getFileIdPath(fromHandle); String toDirFileIdPath = Nfs3Utils.getFileIdPath(toHandle); - WccAttr fromPreOpAttr = null; - WccAttr toPreOpAttr = null; + Nfs3FileAttributes fromPreOpAttr = null; + Nfs3FileAttributes toPreOpAttr = null; WccData fromDirWcc = null; WccData toDirWcc = null; try { - fromPreOpAttr = Nfs3Utils.getWccAttr(dfsClient, fromDirFileIdPath); + fromPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, fromDirFileIdPath, iug); if (fromPreOpAttr == null) { LOG.info("Can't get path for fromHandle fileId:" + fromHandle.getFileId()); return new RENAME3Response(Nfs3Status.NFS3ERR_STALE); } - toPreOpAttr = Nfs3Utils.getWccAttr(dfsClient, toDirFileIdPath); + toPreOpAttr = Nfs3Utils.getFileAttr(dfsClient, toDirFileIdPath, iug); if (toPreOpAttr == null) { LOG.info("Can't get path for toHandle fileId:" + toHandle.getFileId()); return new RENAME3Response(Nfs3Status.NFS3ERR_STALE); } + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + WccData fromWcc = new WccData(Nfs3Utils.getWccAttr(fromPreOpAttr), + fromPreOpAttr); + WccData toWcc = new WccData(Nfs3Utils.getWccAttr(toPreOpAttr), + toPreOpAttr); + return new RENAME3Response(Nfs3Status.NFS3ERR_ACCES, fromWcc, toWcc); + } String src = fromDirFileIdPath + "/" + fromName; String dst = toDirFileIdPath + "/" + toName; @@ -1011,20 +1085,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { dfsClient.rename(src, dst, Options.Rename.NONE); // Assemble the reply - fromDirWcc = Nfs3Utils.createWccData(fromPreOpAttr, dfsClient, - fromDirFileIdPath, iug); - toDirWcc = Nfs3Utils.createWccData(toPreOpAttr, dfsClient, - toDirFileIdPath, iug); + fromDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(fromPreOpAttr), + dfsClient, fromDirFileIdPath, iug); + toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr), + dfsClient, toDirFileIdPath, iug); return new RENAME3Response(Nfs3Status.NFS3_OK, fromDirWcc, toDirWcc); - } catch (IOException e) { LOG.warn("Exception ", e); // Try to return correct WccData try { - fromDirWcc = Nfs3Utils.createWccData(fromPreOpAttr, dfsClient, - fromDirFileIdPath, iug); - toDirWcc = Nfs3Utils.createWccData(toPreOpAttr, dfsClient, - toDirFileIdPath, iug); + fromDirWcc = Nfs3Utils.createWccData( + Nfs3Utils.getWccAttr(fromPreOpAttr), dfsClient, fromDirFileIdPath, + iug); + toDirWcc = Nfs3Utils.createWccData(Nfs3Utils.getWccAttr(toPreOpAttr), + dfsClient, toDirFileIdPath, iug); } catch (IOException e1) { LOG.info("Can't get postOpDirAttr for " + fromDirFileIdPath + " or" + toDirFileIdPath); @@ -1038,16 +1112,25 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public SYMLINK3Response symlink(XDR xdr, RpcAuthSys authSys) { + public SYMLINK3Response symlink(XDR xdr, RpcAuthSys authSys, + InetAddress client) { return new SYMLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); } - public READDIR3Response link(XDR xdr, RpcAuthSys authSys) { + public READDIR3Response link(XDR xdr, RpcAuthSys authSys, InetAddress client) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } - public READDIR3Response readdir(XDR xdr, RpcAuthSys authSys) { + @Override + public READDIR3Response readdir(XDR xdr, RpcAuthSys authSys, + InetAddress client) { READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -1180,7 +1263,12 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { dirStatus.getModificationTime(), dirList); } - public READDIRPLUS3Response readdirplus(XDR xdr, RpcAuthSys authSys) { + public READDIRPLUS3Response readdirplus(XDR xdr, RpcAuthSys authSys, + InetAddress client) { + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES); + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -1325,8 +1413,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { dirStatus.getModificationTime(), dirListPlus); } - public FSSTAT3Response fsstat(XDR xdr, RpcAuthSys authSys) { + @Override + public FSSTAT3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client) { FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -1376,8 +1471,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public FSINFO3Response fsinfo(XDR xdr, RpcAuthSys authSys) { + @Override + public FSINFO3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client) { FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -1421,8 +1523,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public PATHCONF3Response pathconf(XDR xdr, RpcAuthSys authSys) { + @Override + public PATHCONF3Response pathconf(XDR xdr, RpcAuthSys authSys, + InetAddress client) { PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK); + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { + response.setStatus(Nfs3Status.NFS3ERR_ACCES); + return response; + } + String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); if (dfsClient == null) { @@ -1461,7 +1571,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public COMMIT3Response commit(XDR xdr, RpcAuthSys authSys) { + @Override + public COMMIT3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client) { COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); String uname = authSysCheck(authSys); DFSClient dfsClient = clientCache.get(uname); @@ -1486,13 +1597,20 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } String fileIdPath = Nfs3Utils.getFileIdPath(handle); - WccAttr preOpAttr = null; + Nfs3FileAttributes preOpAttr = null; try { - preOpAttr = Nfs3Utils.getWccAttr(dfsClient, fileIdPath); + preOpAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); if (preOpAttr == null) { LOG.info("Can't get path for fileId:" + handle.getFileId()); return new COMMIT3Response(Nfs3Status.NFS3ERR_STALE); } + + if (!checkAccessPrivilege(client, AccessPrivilege.READ_WRITE)) { + return new COMMIT3Response(Nfs3Status.NFS3ERR_ACCES, new WccData( + Nfs3Utils.getWccAttr(preOpAttr), preOpAttr), + Nfs3Constant.WRITE_COMMIT_VERF); + } + long commitOffset = (request.getCount() == 0) ? 0 : (request.getOffset() + request.getCount()); @@ -1504,7 +1622,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } Nfs3FileAttributes postOpAttr = writeManager.getFileAttr(dfsClient, handle, iug); - WccData fileWcc = new WccData(preOpAttr, postOpAttr); + WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr); return new COMMIT3Response(status, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF); @@ -1516,7 +1634,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } catch (IOException e1) { LOG.info("Can't get postOpAttr for fileId: " + handle.getFileId()); } - WccData fileWcc = new WccData(preOpAttr, postOpAttr); + WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), postOpAttr); return new COMMIT3Response(Nfs3Status.NFS3ERR_IO, fileWcc, Nfs3Constant.WRITE_COMMIT_VERF); } @@ -1554,47 +1672,47 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (nfsproc3 == NFSPROC3.NULL) { response = nullProcedure(); } else if (nfsproc3 == NFSPROC3.GETATTR) { - response = getattr(xdr, authSys); + response = getattr(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.SETATTR) { - response = setattr(xdr, authSys); + response = setattr(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.LOOKUP) { - response = lookup(xdr, authSys); + response = lookup(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.ACCESS) { - response = access(xdr, authSys); + response = access(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.READLINK) { - response = readlink(xdr, authSys); + response = readlink(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.READ) { - response = read(xdr, authSys); + response = read(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.WRITE) { - response = write(xdr, channel, xid, authSys); + response = write(xdr, channel, xid, authSys, client); } else if (nfsproc3 == NFSPROC3.CREATE) { - response = create(xdr, authSys); + response = create(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.MKDIR) { - response = mkdir(xdr, authSys); + response = mkdir(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.SYMLINK) { - response = symlink(xdr, authSys); + response = symlink(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.MKNOD) { - response = mknod(xdr, authSys); + response = mknod(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.REMOVE) { - response = remove(xdr, authSys); + response = remove(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.RMDIR) { - response = rmdir(xdr, authSys); + response = rmdir(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.RENAME) { - response = rename(xdr, authSys); + response = rename(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.LINK) { - response = link(xdr, authSys); + response = link(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.READDIR) { - response = readdir(xdr, authSys); + response = readdir(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.READDIRPLUS) { - response = readdirplus(xdr, authSys); + response = readdirplus(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.FSSTAT) { - response = fsstat(xdr, authSys); + response = fsstat(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.FSINFO) { - response = fsinfo(xdr, authSys); + response = fsinfo(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.PATHCONF) { - response = pathconf(xdr, authSys); + response = pathconf(xdr, authSys, client); } else if (nfsproc3 == NFSPROC3.COMMIT) { - response = commit(xdr, authSys); + response = commit(xdr, authSys, client); } else { // Invalid procedure RpcAcceptedReply.voidReply(out, xid, @@ -1611,4 +1729,17 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(call.getProcedure()); return nfsproc3 == null || nfsproc3.isIdempotent(); } + + private boolean checkAccessPrivilege(final InetAddress client, + final AccessPrivilege expected) { + AccessPrivilege access = exports.getAccessPrivilege(client); + if (access == AccessPrivilege.NONE) { + return false; + } + if (access == AccessPrivilege.READ_ONLY + && expected == AccessPrivilege.READ_WRITE) { + return false; + } + return true; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java new file mode 100644 index 00000000000..43a0d001f26 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java @@ -0,0 +1,24 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.nfs.security; + +public enum AccessPrivilege { + READ_ONLY, + READ_WRITE, + NONE; +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java new file mode 100644 index 00000000000..ad194e9e2d3 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java @@ -0,0 +1,354 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.nfs.security; + +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.List; +import java.util.regex.Pattern; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.commons.net.util.SubnetUtils; +import org.apache.commons.net.util.SubnetUtils.SubnetInfo; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.apache.hadoop.util.LightWeightCache; +import org.apache.hadoop.util.LightWeightGSet; +import org.apache.hadoop.util.LightWeightGSet.LinkedElement; + +import com.google.common.base.Preconditions; + +/** + * This class provides functionality for loading and checking the mapping + * between client hosts and their access privileges. + */ +public class NfsExports { + + private static NfsExports exports = null; + + public static synchronized NfsExports getInstance(Configuration conf) { + if (exports == null) { + String matchHosts = conf.get(Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY, + Nfs3Constant.EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT); + int cacheSize = conf.getInt(Nfs3Constant.EXPORTS_CACHE_SIZE_KEY, + Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT); + long expirationPeriodNano = conf.getLong( + Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY, + Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000; + exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts); + } + return exports; + } + + public static final Log LOG = LogFactory.getLog(NfsExports.class); + + // only support IPv4 now + private static final String IP_ADDRESS = + "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})"; + private static final String SLASH_FORMAT_SHORT = IP_ADDRESS + "/(\\d{1,3})"; + private static final String SLASH_FORMAT_LONG = IP_ADDRESS + "/" + IP_ADDRESS; + + private static final Pattern CIDR_FORMAT_SHORT = + Pattern.compile(SLASH_FORMAT_SHORT); + + private static final Pattern CIDR_FORMAT_LONG = + Pattern.compile(SLASH_FORMAT_LONG); + + static class AccessCacheEntry implements LightWeightCache.Entry{ + private final String hostAddr; + private AccessPrivilege access; + private final long expirationTime; + + private LightWeightGSet.LinkedElement next; + + AccessCacheEntry(String hostAddr, AccessPrivilege access, + long expirationTime) { + Preconditions.checkArgument(hostAddr != null); + this.hostAddr = hostAddr; + this.access = access; + this.expirationTime = expirationTime; + } + + @Override + public int hashCode() { + return hostAddr.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj instanceof AccessCacheEntry) { + AccessCacheEntry entry = (AccessCacheEntry) obj; + return this.hostAddr.equals(entry.hostAddr); + } + return false; + } + + @Override + public void setNext(LinkedElement next) { + this.next = next; + } + + @Override + public LinkedElement getNext() { + return this.next; + } + + @Override + public void setExpirationTime(long timeNano) { + // we set expiration time in the constructor, and the expiration time + // does not change + } + + @Override + public long getExpirationTime() { + return this.expirationTime; + } + } + + private final List mMatches; + + private final LightWeightCache accessCache; + private final long cacheExpirationPeriod; + + /** + * Constructor. + * @param cacheSize The size of the access privilege cache. + * @param expirationPeriodNano The period + * @param matchingHosts A string specifying one or multiple matchers. + */ + NfsExports(int cacheSize, long expirationPeriodNano, String matchHosts) { + this.cacheExpirationPeriod = expirationPeriodNano; + accessCache = new LightWeightCache( + cacheSize, cacheSize, expirationPeriodNano, 0); + String[] matchStrings = matchHosts.split( + Nfs3Constant.EXPORTS_ALLOWED_HOSTS_SEPARATOR); + mMatches = new ArrayList(matchStrings.length); + for(String mStr : matchStrings) { + if (LOG.isDebugEnabled()) { + LOG.debug("Processing match string '" + mStr + "'"); + } + mStr = mStr.trim(); + if(!mStr.isEmpty()) { + mMatches.add(getMatch(mStr)); + } + } + } + + public AccessPrivilege getAccessPrivilege(InetAddress addr) { + return getAccessPrivilege(addr.getHostAddress(), + addr.getCanonicalHostName()); + } + + AccessPrivilege getAccessPrivilege(String address, String hostname) { + long now = System.nanoTime(); + AccessCacheEntry newEntry = new AccessCacheEntry(address, + AccessPrivilege.NONE, now + this.cacheExpirationPeriod); + // check if there is a cache entry for the given address + AccessCacheEntry cachedEntry = accessCache.get(newEntry); + if (cachedEntry != null && now < cachedEntry.expirationTime) { + // get a non-expired cache entry, use it + return cachedEntry.access; + } else { + for(Match match : mMatches) { + if(match.isIncluded(address, hostname)) { + if (match.accessPrivilege == AccessPrivilege.READ_ONLY) { + newEntry.access = AccessPrivilege.READ_ONLY; + break; + } else if (match.accessPrivilege == AccessPrivilege.READ_WRITE) { + newEntry.access = AccessPrivilege.READ_WRITE; + } + } + } + accessCache.put(newEntry); + return newEntry.access; + } + } + + private static abstract class Match { + private final AccessPrivilege accessPrivilege; + + private Match(AccessPrivilege accessPrivilege) { + this.accessPrivilege = accessPrivilege; + } + + public abstract boolean isIncluded(String address, String hostname); + } + + /** + * Matcher covering all client hosts (specified by "*") + */ + private static class AnonymousMatch extends Match { + private AnonymousMatch(AccessPrivilege accessPrivilege) { + super(accessPrivilege); + } + + @Override + public boolean isIncluded(String ip, String hostname) { + return true; + } + } + + /** + * Matcher using CIDR for client host matching + */ + private static class CIDRMatch extends Match { + private final SubnetInfo subnetInfo; + + private CIDRMatch(AccessPrivilege accessPrivilege, SubnetInfo subnetInfo) { + super(accessPrivilege); + this.subnetInfo = subnetInfo; + } + + @Override + public boolean isIncluded(String address, String hostname) { + if(subnetInfo.isInRange(address)) { + if(LOG.isDebugEnabled()) { + LOG.debug("CIDRNMatcher low = " + subnetInfo.getLowAddress() + + ", high = " + subnetInfo.getHighAddress() + + ", allowing client '" + address + "', '" + hostname + "'"); + } + return true; + } + if(LOG.isDebugEnabled()) { + LOG.debug("CIDRNMatcher low = " + subnetInfo.getLowAddress() + + ", high = " + subnetInfo.getHighAddress() + + ", denying client '" + address + "', '" + hostname + "'"); + } + return false; + } + } + + /** + * Matcher requiring exact string match for client host + */ + private static class ExactMatch extends Match { + private final String ipOrHost; + + private ExactMatch(AccessPrivilege accessPrivilege, String ipOrHost) { + super(accessPrivilege); + this.ipOrHost = ipOrHost; + } + + @Override + public boolean isIncluded(String address, String hostname) { + if(ipOrHost.equalsIgnoreCase(address) || + ipOrHost.equalsIgnoreCase(hostname)) { + if(LOG.isDebugEnabled()) { + LOG.debug("ExactMatcher '" + ipOrHost + "', allowing client " + + "'" + address + "', '" + hostname + "'"); + } + return true; + } + if(LOG.isDebugEnabled()) { + LOG.debug("ExactMatcher '" + ipOrHost + "', denying client " + + "'" + address + "', '" + hostname + "'"); + } + return false; + } + } + + /** + * Matcher where client hosts are specified by regular expression + */ + private static class RegexMatch extends Match { + private final Pattern pattern; + + private RegexMatch(AccessPrivilege accessPrivilege, String wildcard) { + super(accessPrivilege); + this.pattern = Pattern.compile(wildcard, Pattern.CASE_INSENSITIVE); + } + + @Override + public boolean isIncluded(String address, String hostname) { + if (pattern.matcher(address).matches() + || pattern.matcher(hostname).matches()) { + if (LOG.isDebugEnabled()) { + LOG.debug("RegexMatcher '" + pattern.pattern() + + "', allowing client '" + address + "', '" + hostname + "'"); + } + return true; + } + if (LOG.isDebugEnabled()) { + LOG.debug("RegexMatcher '" + pattern.pattern() + + "', denying client '" + address + "', '" + hostname + "'"); + } + return false; + } + } + + /** + * Loading a matcher from a string. The default access privilege is read-only. + * The string contains 1 or 2 parts, separated by whitespace characters, where + * the first part specifies the client hosts, and the second part (if + * existent) specifies the access privilege of the client hosts. I.e., + * + * "client-hosts [access-privilege]" + */ + private static Match getMatch(String line) { + String[] parts = line.split("\\s+"); + final String host; + AccessPrivilege privilege = AccessPrivilege.READ_ONLY; + switch (parts.length) { + case 1: + host = parts[0].toLowerCase().trim(); + break; + case 2: + host = parts[0].toLowerCase().trim(); + String option = parts[1].trim(); + if ("rw".equalsIgnoreCase(option)) { + privilege = AccessPrivilege.READ_WRITE; + } + break; + default: + throw new IllegalArgumentException("Incorrectly formatted line '" + line + + "'"); + } + if (host.equals("*")) { + if (LOG.isDebugEnabled()) { + LOG.debug("Using match all for '" + host + "' and " + privilege); + } + return new AnonymousMatch(privilege); + } else if (CIDR_FORMAT_SHORT.matcher(host).matches()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Using CIDR match for '" + host + "' and " + privilege); + } + return new CIDRMatch(privilege, new SubnetUtils(host).getInfo()); + } else if (CIDR_FORMAT_LONG.matcher(host).matches()) { + if (LOG.isDebugEnabled()) { + LOG.debug("Using CIDR match for '" + host + "' and " + privilege); + } + String[] pair = host.split("/"); + return new CIDRMatch(privilege, + new SubnetUtils(pair[0], pair[1]).getInfo()); + } else if (host.contains("*") || host.contains("?") || host.contains("[") + || host.contains("]")) { + if (LOG.isDebugEnabled()) { + LOG.debug("Using Regex match for '" + host + "' and " + privilege); + } + return new RegexMatch(privilege, host); + } + if (LOG.isDebugEnabled()) { + LOG.debug("Using exact match for '" + host + "' and " + privilege); + } + return new ExactMatch(privilege, host); + } +} \ No newline at end of file diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java new file mode 100644 index 00000000000..9448e18632e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java @@ -0,0 +1,191 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.nfs.security; + +import junit.framework.Assert; + +import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; +import org.apache.hadoop.hdfs.nfs.security.NfsExports; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.junit.Test; + +public class TestNfsExports { + + private final String address1 = "192.168.0.1"; + private final String address2 = "10.0.0.1"; + private final String hostname1 = "a.b.com"; + private final String hostname2 = "a.b.org"; + + private static final long ExpirationPeriod = + Nfs3Constant.EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT * 1000 * 1000; + + private static final int CacheSize = Nfs3Constant.EXPORTS_CACHE_SIZE_DEFAULT; + + @Test + public void testWildcardRW() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "* rw"); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname1)); + } + + @Test + public void testWildcardRO() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, "* ro"); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + } + + @Test + public void testExactAddressRW() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1 + + " rw"); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertFalse(AccessPrivilege.READ_WRITE == matcher + .getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testExactAddressRO() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, address1); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testExactHostRW() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, hostname1 + + " rw"); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname1)); + } + + @Test + public void testExactHostRO() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, hostname1); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + } + + @Test + public void testCidrShortRW() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "192.168.0.0/22 rw"); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testCidrShortRO() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "192.168.0.0/22"); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testCidrLongRW() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "192.168.0.0/255.255.252.0 rw"); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testCidrLongRO() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "192.168.0.0/255.255.252.0"); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testRegexIPRW() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "192.168.0.[0-9]+ rw"); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testRegexIPRO() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "192.168.0.[0-9]+"); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, hostname1)); + } + + @Test + public void testRegexHostRW() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "[a-z]+.b.com rw"); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname1)); + // address1 will hit the cache + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address1, hostname2)); + } + + @Test + public void testRegexHostRO() { + NfsExports matcher = new NfsExports(CacheSize, ExpirationPeriod, + "[a-z]+.b.com"); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + // address1 will hit the cache + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname2)); + } + + @Test + public void testMultiMatchers() throws Exception { + long shortExpirationPeriod = 1 * 1000 * 1000 * 1000; // 1s + NfsExports matcher = new NfsExports(CacheSize, shortExpirationPeriod, + "192.168.0.[0-9]+;[a-z]+.b.com rw"); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname2)); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, address1)); + Assert.assertEquals(AccessPrivilege.READ_ONLY, + matcher.getAccessPrivilege(address1, hostname1)); + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address2, hostname1)); + // address2 will hit the cache + Assert.assertEquals(AccessPrivilege.READ_WRITE, + matcher.getAccessPrivilege(address2, hostname2)); + + Thread.sleep(1000); + // no cache for address2 now + Assert.assertEquals(AccessPrivilege.NONE, + matcher.getAccessPrivilege(address2, address2)); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 64ea20b06e6..644699f67f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -304,6 +304,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5069 Include hadoop-nfs and hadoop-hdfs-nfs into hadoop dist for NFS deployment (brandonli) + HDFS-4947 Add NFS server export table to control export by hostname or + IP range (Jing Zhao via brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 9ee38f3a841aa8c0ed68b54d8c0306c25e9c21bb Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Fri, 23 Aug 2013 21:23:10 +0000 Subject: [PATCH 063/153] MAPREDUCE-5478. TeraInputFormat unnecessarily defines its own FileSplit subclass (Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517046 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 ++ .../examples/terasort/TeraInputFormat.java | 49 +------------------ .../examples/terasort/TeraScheduler.java | 6 +-- 3 files changed, 7 insertions(+), 51 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 6de12d2e9c1..5769d926e67 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -181,6 +181,9 @@ Release 2.1.1-beta - UNRELEASED IMPROVEMENTS + MAPREDUCE-5478. TeraInputFormat unnecessarily defines its own FileSplit + subclass (Sandy Ryza) + OPTIMIZATIONS MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java index f957ad9e4f6..88b12dd1ff4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraInputFormat.java @@ -60,48 +60,6 @@ public class TeraInputFormat extends FileInputFormat { private static MRJobConfig lastContext = null; private static List lastResult = null; - static class TeraFileSplit extends FileSplit { - static private String[] ZERO_LOCATIONS = new String[0]; - - private String[] locations; - - public TeraFileSplit() { - locations = ZERO_LOCATIONS; - } - public TeraFileSplit(Path file, long start, long length, String[] hosts) { - super(file, start, length, hosts); - try { - locations = super.getLocations(); - } catch (IOException e) { - locations = ZERO_LOCATIONS; - } - } - - // XXXXXX should this also be null-protected? - protected void setLocations(String[] hosts) { - locations = hosts; - } - - @Override - public String[] getLocations() { - return locations; - } - - public String toString() { - StringBuffer result = new StringBuffer(); - result.append(getPath()); - result.append(" from "); - result.append(getStart()); - result.append(" length "); - result.append(getLength()); - for(String host: getLocations()) { - result.append(" "); - result.append(host); - } - return result.toString(); - } - } - static class TextSampler implements IndexedSortable { private ArrayList records = new ArrayList(); @@ -325,11 +283,6 @@ public class TeraInputFormat extends FileInputFormat { return new TeraRecordReader(); } - protected FileSplit makeSplit(Path file, long start, long length, - String[] hosts) { - return new TeraFileSplit(file, start, length, hosts); - } - @Override public List getSplits(JobContext job) throws IOException { if (job == lastContext) { @@ -343,7 +296,7 @@ public class TeraInputFormat extends FileInputFormat { System.out.println("Spent " + (t2 - t1) + "ms computing base-splits."); if (job.getConfiguration().getBoolean(TeraScheduler.USE, true)) { TeraScheduler scheduler = new TeraScheduler( - lastResult.toArray(new TeraFileSplit[0]), job.getConfiguration()); + lastResult.toArray(new FileSplit[0]), job.getConfiguration()); lastResult = scheduler.getNewFileSplits(); t3 = System.currentTimeMillis(); System.out.println("Spent " + (t3 - t2) + "ms computing TeraScheduler splits."); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java index 82a451246c7..7095dd7d28e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/terasort/TeraScheduler.java @@ -24,7 +24,6 @@ import java.util.*; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.examples.terasort.TeraInputFormat.TeraFileSplit; import org.apache.hadoop.mapreduce.InputSplit; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.server.tasktracker.TTConfig; @@ -214,8 +213,9 @@ class TeraScheduler { for(int i=0; i < splits.length; ++i) { if (splits[i].isAssigned) { // copy the split and fix up the locations - ((TeraFileSplit) realSplits[i]).setLocations - (new String[]{splits[i].locations.get(0).hostname}); + String[] newLocations = {splits[i].locations.get(0).hostname}; + realSplits[i] = new FileSplit(realSplits[i].getPath(), + realSplits[i].getStart(), realSplits[i].getLength(), newLocations); result[left++] = realSplits[i]; } else { result[right--] = realSplits[i]; From f1638fdf94733ceb7ff716b48175875e70064646 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Fri, 23 Aug 2013 22:11:54 +0000 Subject: [PATCH 064/153] YARN-707. Add user info in the YARN ClientToken. Contributed by Vinod Kumar Vavilapalli git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517073 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 + .../client/ClientToAMTokenIdentifier.java | 15 ++++- .../rmapp/attempt/RMAppAttemptImpl.java | 2 +- .../recovery/TestRMStateStore.java | 2 +- .../security/TestClientToAMTokens.java | 64 ++++++++++++++----- 5 files changed, 63 insertions(+), 22 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 04ad1a27a00..ab61b8c181e 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -43,6 +43,8 @@ Release 2.1.1-beta - UNRELEASED YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza). + YARN-707. Add user info in the YARN ClientToken (vinodkv via jlowe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java index d9c576eead3..22497386601 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java @@ -39,6 +39,7 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { public static final Text KIND_NAME = new Text("YARN_CLIENT_TOKEN"); private ApplicationAttemptId applicationAttemptId; + private Text applicationSubmitter = new Text(); // TODO: Add more information in the tokenID such that it is not // transferrable, more secure etc. @@ -46,21 +47,27 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { public ClientToAMTokenIdentifier() { } - public ClientToAMTokenIdentifier(ApplicationAttemptId id) { + public ClientToAMTokenIdentifier(ApplicationAttemptId id, String appSubmitter) { this(); this.applicationAttemptId = id; + this.applicationSubmitter = new Text(appSubmitter); } public ApplicationAttemptId getApplicationAttemptID() { return this.applicationAttemptId; } + public String getApplicationSubmitter() { + return this.applicationSubmitter.toString(); + } + @Override public void write(DataOutput out) throws IOException { out.writeLong(this.applicationAttemptId.getApplicationId() .getClusterTimestamp()); out.writeInt(this.applicationAttemptId.getApplicationId().getId()); out.writeInt(this.applicationAttemptId.getAttemptId()); + this.applicationSubmitter.write(out); } @Override @@ -68,6 +75,7 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { this.applicationAttemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(in.readLong(), in.readInt()), in.readInt()); + this.applicationSubmitter.readFields(in); } @Override @@ -77,10 +85,11 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { @Override public UserGroupInformation getUser() { - if (this.applicationAttemptId == null) { + if (this.applicationSubmitter == null) { return null; } - return UserGroupInformation.createRemoteUser(this.applicationAttemptId.toString()); + return UserGroupInformation.createRemoteUser(this.applicationSubmitter + .toString()); } @InterfaceAudience.Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 1543110db03..048002456d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -722,7 +722,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { // create clientToAMToken appAttempt.clientToAMToken = new Token(new ClientToAMTokenIdentifier( - appAttempt.applicationAttemptId), + appAttempt.applicationAttemptId, appAttempt.user), appAttempt.rmContext.getClientToAMTokenSecretManager()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java index 05916129e3b..98319522ff8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java @@ -367,7 +367,7 @@ public class TestRMStateStore { appToken.setService(new Text("appToken service")); ClientToAMTokenIdentifier clientToAMTokenId = - new ClientToAMTokenIdentifier(attemptId); + new ClientToAMTokenIdentifier(attemptId, "user"); clientToAMTokenMgr.registerApplication(attemptId); Token clientToAMToken = new Token(clientToAMTokenId, clientToAMTokenMgr); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java index fc2fda85202..6f68804fdcf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java @@ -115,7 +115,6 @@ public class TestClientToAMTokens { private final byte[] secretKey; private InetSocketAddress address; private boolean pinged = false; - private ClientToAMTokenSecretManager secretManager; public CustomAM(ApplicationAttemptId appId, byte[] secretKey) { super("CustomAM"); @@ -132,12 +131,14 @@ public class TestClientToAMTokens { protected void serviceStart() throws Exception { Configuration conf = getConfig(); - secretManager = new ClientToAMTokenSecretManager(this.appAttemptId, secretKey); Server server; try { server = - new RPC.Builder(conf).setProtocol(CustomProtocol.class) - .setNumHandlers(1).setSecretManager(secretManager) + new RPC.Builder(conf) + .setProtocol(CustomProtocol.class) + .setNumHandlers(1) + .setSecretManager( + new ClientToAMTokenSecretManager(this.appAttemptId, secretKey)) .setInstance(this).build(); } catch (Exception e) { throw new YarnRuntimeException(e); @@ -146,14 +147,10 @@ public class TestClientToAMTokens { this.address = NetUtils.getConnectAddress(server); super.serviceStart(); } - - public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() { - return this.secretManager; - } } @Test - public void testClientToAMs() throws Exception { + public void testClientToAMTokenss() throws Exception { final Configuration conf = new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, @@ -204,7 +201,7 @@ public class TestClientToAMTokens { GetApplicationReportResponse reportResponse = rm.getClientRMService().getApplicationReport(request); ApplicationReport appReport = reportResponse.getApplicationReport(); - org.apache.hadoop.yarn.api.records.Token clientToAMToken = + org.apache.hadoop.yarn.api.records.Token originalClientToAMToken = appReport.getClientToAMToken(); ApplicationAttemptId appAttempt = app.getCurrentAppAttempt().getAppAttemptId(); @@ -259,17 +256,47 @@ public class TestClientToAMTokens { Assert.assertFalse(am.pinged); } - // Verify denial for a malicious user - UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); Token token = - ConverterUtils.convertFromYarn(clientToAMToken, am.address); + ConverterUtils.convertFromYarn(originalClientToAMToken, am.address); + // Verify denial for a malicious user with tampered ID + verifyTokenWithTamperedID(conf, am, token); + + // Verify denial for a malicious user with tampered user-name + verifyTokenWithTamperedUserName(conf, am, token); + + // Now for an authenticated user + verifyValidToken(conf, am, token); + } + + private void verifyTokenWithTamperedID(final Configuration conf, + final CustomAM am, Token token) + throws IOException { // Malicious user, messes with appId + UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); ClientToAMTokenIdentifier maliciousID = new ClientToAMTokenIdentifier(BuilderUtils.newApplicationAttemptId( - BuilderUtils.newApplicationId(app.getApplicationId() - .getClusterTimestamp(), 42), 43)); + BuilderUtils.newApplicationId(am.appAttemptId.getApplicationId() + .getClusterTimestamp(), 42), 43), UserGroupInformation + .getCurrentUser().getShortUserName()); + verifyTamperedToken(conf, am, token, ugi, maliciousID); + } + + private void verifyTokenWithTamperedUserName(final Configuration conf, + final CustomAM am, Token token) + throws IOException { + // Malicious user, messes with appId + UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); + ClientToAMTokenIdentifier maliciousID = + new ClientToAMTokenIdentifier(am.appAttemptId, "evilOrc"); + + verifyTamperedToken(conf, am, token, ugi, maliciousID); + } + + private void verifyTamperedToken(final Configuration conf, final CustomAM am, + Token token, UserGroupInformation ugi, + ClientToAMTokenIdentifier maliciousID) { Token maliciousToken = new Token(maliciousID.getBytes(), token.getPassword(), token.getKind(), @@ -309,8 +336,12 @@ public class TestClientToAMTokens { + "Mismatched response.")); Assert.assertFalse(am.pinged); } + } - // Now for an authenticated user + private void verifyValidToken(final Configuration conf, final CustomAM am, + Token token) throws IOException, + InterruptedException { + UserGroupInformation ugi; ugi = UserGroupInformation.createRemoteUser("me"); ugi.addToken(token); @@ -326,5 +357,4 @@ public class TestClientToAMTokens { } }); } - } From d912eea82221ec9851a4dab3a6473a05a4b8dded Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Fri, 23 Aug 2013 22:46:08 +0000 Subject: [PATCH 065/153] YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517083 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 + .../hadoop/yarn/client/cli/NodeCLI.java | 49 +++- .../hadoop/yarn/client/cli/TestYarnCLI.java | 234 ++++++++++++++++-- 3 files changed, 264 insertions(+), 21 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ab61b8c181e..eba17467d78 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -23,6 +23,8 @@ Release 2.3.0 - UNRELEASED IMPROVEMENTS + YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java index 16e80dd93d7..afe3287b5f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java @@ -21,11 +21,14 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintWriter; import java.util.Date; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.lang.time.DateFormatUtils; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -40,9 +43,12 @@ import org.apache.hadoop.yarn.util.ConverterUtils; @Private @Unstable public class NodeCLI extends YarnCLI { - private static final String NODES_PATTERN = "%16s\t%10s\t%17s\t%18s" + + private static final String NODES_PATTERN = "%16s\t%15s\t%17s\t%18s" + System.getProperty("line.separator"); + private static final String NODE_STATE_CMD = "states"; + private static final String NODE_ALL = "all"; + public static void main(String[] args) throws Exception { NodeCLI cli = new NodeCLI(); cli.setSysOutPrintStream(System.out); @@ -57,7 +63,18 @@ public class NodeCLI extends YarnCLI { Options opts = new Options(); opts.addOption(STATUS_CMD, true, "Prints the status report of the node."); - opts.addOption(LIST_CMD, false, "Lists all the nodes in the RUNNING state."); + opts.addOption(LIST_CMD, false, "List all running nodes. " + + "Supports optional use of --states to filter nodes " + + "based on node state, all --all to list all nodes."); + Option nodeStateOpt = new Option(NODE_STATE_CMD, true, + "Works with -list to filter nodes based on their states."); + nodeStateOpt.setValueSeparator(','); + nodeStateOpt.setArgs(Option.UNLIMITED_VALUES); + nodeStateOpt.setArgName("Comma-separated list of node states"); + opts.addOption(nodeStateOpt); + Option allOpt = new Option(NODE_ALL, false, + "Works with -list to list all nodes."); + opts.addOption(allOpt); CommandLine cliParser = new GnuParser().parse(opts, args); int exitCode = -1; @@ -68,7 +85,24 @@ public class NodeCLI extends YarnCLI { } printNodeStatus(cliParser.getOptionValue("status")); } else if (cliParser.hasOption("list")) { - listClusterNodes(); + Set nodeStates = new HashSet(); + if (cliParser.hasOption(NODE_ALL)) { + for (NodeState state : NodeState.values()) { + nodeStates.add(state); + } + } else if (cliParser.hasOption(NODE_STATE_CMD)) { + String[] types = cliParser.getOptionValues(NODE_STATE_CMD); + if (types != null) { + for (String type : types) { + if (!type.trim().isEmpty()) { + nodeStates.add(NodeState.valueOf(type.trim().toUpperCase())); + } + } + } + } else { + nodeStates.add(NodeState.RUNNING); + } + listClusterNodes(nodeStates); } else { syserr.println("Invalid Command Usage : "); printUsage(opts); @@ -86,14 +120,17 @@ public class NodeCLI extends YarnCLI { } /** - * Lists all the nodes present in the cluster + * Lists the nodes matching the given node states * + * @param nodeStates * @throws YarnException * @throws IOException */ - private void listClusterNodes() throws YarnException, IOException { + private void listClusterNodes(Set nodeStates) + throws YarnException, IOException { PrintWriter writer = new PrintWriter(sysout); - List nodesReport = client.getNodeReports(NodeState.RUNNING); + List nodesReport = client.getNodeReports( + nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", "Running-Containers"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 8be8b68e491..c33ddea38b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -363,36 +363,239 @@ public class TestYarnCLI { @Test public void testListClusterNodes() throws Exception { + List nodeReports = new ArrayList(); + nodeReports.addAll(getNodeReports(1, NodeState.NEW)); + nodeReports.addAll(getNodeReports(2, NodeState.RUNNING)); + nodeReports.addAll(getNodeReports(1, NodeState.UNHEALTHY)); + nodeReports.addAll(getNodeReports(1, NodeState.DECOMMISSIONED)); + nodeReports.addAll(getNodeReports(1, NodeState.REBOOTED)); + nodeReports.addAll(getNodeReports(1, NodeState.LOST)); + NodeCLI cli = new NodeCLI(); - when(client.getNodeReports(NodeState.RUNNING)).thenReturn( - getNodeReports(3)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); - int result = cli.run(new String[] { "-list" }); + + Set nodeStates = new HashSet(); + nodeStates.add(NodeState.NEW); + NodeState[] states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + int result = cli.run(new String[] { "-list", "--states", "NEW" }); assertEquals(0, result); - verify(client).getNodeReports(NodeState.RUNNING); + verify(client).getNodeReports(states); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); - pw.println("Total Nodes:3"); - pw.print(" Node-Id\tNode-State\tNode-Http-Address\t"); + pw.println("Total Nodes:1"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); pw.println("Running-Containers"); - pw.print(" host0:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); - pw.print(" host1:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); - pw.print(" host2:0\t RUNNING\t host1:8888"); + pw.print(" host0:0\t NEW\t host1:8888"); pw.println("\t 0"); pw.close(); String nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + nodeStates.clear(); + nodeStates.add(NodeState.RUNNING); + states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + result = cli.run(new String[] { "-list", "--states", "RUNNING" }); + assertEquals(0, result); + verify(client).getNodeReports(states); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total Nodes:2"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); + pw.println("Running-Containers"); + pw.print(" host0:0\t RUNNING\t host1:8888"); + pw.println("\t 0"); + pw.print(" host1:0\t RUNNING\t host1:8888"); + pw.println("\t 0"); + pw.close(); + nodesReportStr = baos.toString("UTF-8"); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + result = cli.run(new String[] { "-list" }); + assertEquals(0, result); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + nodeStates.clear(); + nodeStates.add(NodeState.UNHEALTHY); + states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + result = cli.run(new String[] { "-list", "--states", "UNHEALTHY" }); + assertEquals(0, result); + verify(client).getNodeReports(states); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total Nodes:1"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); + pw.println("Running-Containers"); + pw.print(" host0:0\t UNHEALTHY\t host1:8888"); + pw.println("\t 0"); + pw.close(); + nodesReportStr = baos.toString("UTF-8"); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + nodeStates.clear(); + nodeStates.add(NodeState.DECOMMISSIONED); + states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + result = cli.run(new String[] { "-list", "--states", "DECOMMISSIONED" }); + assertEquals(0, result); + verify(client).getNodeReports(states); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total Nodes:1"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); + pw.println("Running-Containers"); + pw.print(" host0:0\t DECOMMISSIONED\t host1:8888"); + pw.println("\t 0"); + pw.close(); + nodesReportStr = baos.toString("UTF-8"); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + nodeStates.clear(); + nodeStates.add(NodeState.REBOOTED); + states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + result = cli.run(new String[] { "-list", "--states", "REBOOTED" }); + assertEquals(0, result); + verify(client).getNodeReports(states); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total Nodes:1"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); + pw.println("Running-Containers"); + pw.print(" host0:0\t REBOOTED\t host1:8888"); + pw.println("\t 0"); + pw.close(); + nodesReportStr = baos.toString("UTF-8"); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + nodeStates.clear(); + nodeStates.add(NodeState.LOST); + states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + result = cli.run(new String[] { "-list", "--states", "LOST" }); + assertEquals(0, result); + verify(client).getNodeReports(states); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total Nodes:1"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); + pw.println("Running-Containers"); + pw.print(" host0:0\t LOST\t host1:8888"); + pw.println("\t 0"); + pw.close(); + nodesReportStr = baos.toString("UTF-8"); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(7)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + nodeStates.clear(); + nodeStates.add(NodeState.NEW); + nodeStates.add(NodeState.RUNNING); + nodeStates.add(NodeState.LOST); + nodeStates.add(NodeState.REBOOTED); + states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + result = cli.run(new String[] { "-list", "--states", + "NEW,RUNNING,LOST,REBOOTED" }); + assertEquals(0, result); + verify(client).getNodeReports(states); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total Nodes:5"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); + pw.println("Running-Containers"); + pw.print(" host0:0\t NEW\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t RUNNING\t host1:8888"); + pw.println("\t 0"); + pw.print(" host1:0\t RUNNING\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t REBOOTED\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t LOST\t host1:8888"); + pw.println("\t 0"); + pw.close(); + nodesReportStr = baos.toString("UTF-8"); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(8)).write(any(byte[].class), anyInt(), anyInt()); + + sysOutStream.reset(); + nodeStates.clear(); + for (NodeState s : NodeState.values()) { + nodeStates.add(s); + } + states = nodeStates.toArray(new NodeState[0]); + when(client.getNodeReports(states)) + .thenReturn(getNodeReports(nodeReports, nodeStates)); + result = cli.run(new String[] { "-list", "--all" }); + assertEquals(0, result); + verify(client).getNodeReports(states); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total Nodes:7"); + pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); + pw.println("Running-Containers"); + pw.print(" host0:0\t NEW\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t RUNNING\t host1:8888"); + pw.println("\t 0"); + pw.print(" host1:0\t RUNNING\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t UNHEALTHY\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t DECOMMISSIONED\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t REBOOTED\t host1:8888"); + pw.println("\t 0"); + pw.print(" host0:0\t LOST\t host1:8888"); + pw.println("\t 0"); + pw.close(); + nodesReportStr = baos.toString("UTF-8"); + Assert.assertEquals(nodesReportStr, sysOutStream.toString()); + verify(sysOut, times(9)).write(any(byte[].class), anyInt(), anyInt()); + } + + private List getNodeReports( + List nodeReports, + Set nodeStates) { + List reports = new ArrayList(); + + for (NodeReport nodeReport : nodeReports) { + if (nodeStates.contains(nodeReport.getNodeState())) { + reports.add(nodeReport); + } + } + return reports; } @Test public void testNodeStatus() throws Exception { NodeId nodeId = NodeId.newInstance("host0", 0); NodeCLI cli = new NodeCLI(); - when(client.getNodeReports()).thenReturn(getNodeReports(3)); + when(client.getNodeReports()).thenReturn( + getNodeReports(3, NodeState.RUNNING)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); @@ -424,7 +627,8 @@ public class TestYarnCLI { public void testAbsentNodeStatus() throws Exception { NodeId nodeId = NodeId.newInstance("Absenthost0", 0); NodeCLI cli = new NodeCLI(); - when(client.getNodeReports()).thenReturn(getNodeReports(0)); + when(client.getNodeReports()).thenReturn( + getNodeReports(0, NodeState.RUNNING)); cli.setClient(client); cli.setSysOutPrintStream(sysOut); cli.setSysErrPrintStream(sysErr); @@ -452,12 +656,12 @@ public class TestYarnCLI { verify(sysErr).println("Invalid Command Usage : "); } - private List getNodeReports(int noOfNodes) { + private List getNodeReports(int noOfNodes, NodeState state) { List nodeReports = new ArrayList(); for (int i = 0; i < noOfNodes; i++) { NodeReport nodeReport = NodeReport.newInstance(NodeId - .newInstance("host" + i, 0), NodeState.RUNNING, "host" + 1 + ":8888", + .newInstance("host" + i, 0), state, "host" + 1 + ":8888", "rack1", Records.newRecord(Resource.class), Records .newRecord(Resource.class), 0, "", 0); nodeReports.add(nodeReport); From 6f93f205156a8da010038821a8ba40a352d2ba6c Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Fri, 23 Aug 2013 22:47:37 +0000 Subject: [PATCH 066/153] MAPREDUCE-5475. MRClientService does not verify ACLs properly. Contributed by Jason Lowe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517085 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 + .../v2/app/client/MRClientService.java | 64 +++++++++----- .../mapreduce/v2/app/TestMRClientService.java | 85 +++++++++++++++++++ 3 files changed, 129 insertions(+), 22 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5769d926e67..387952af62f 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -243,6 +243,8 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5476. Changed MR AM recovery code to cleanup staging-directory only after unregistering from the RM. (Jian He via vinodkv) + MAPREDUCE-5475. MRClientService does not verify ACLs properly (jlowe) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 4bb39696e1e..d36bf62fdf0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; @@ -78,6 +79,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider; import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -175,16 +178,22 @@ public class MRClientService extends AbstractService return getBindAddress(); } - private Job verifyAndGetJob(JobId jobID, - boolean modifyAccess) throws IOException { + private Job verifyAndGetJob(JobId jobID, + JobACL accessType) throws IOException { Job job = appContext.getJob(jobID); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + if (!job.checkAccess(ugi, accessType)) { + throw new AccessControlException("User " + ugi.getShortUserName() + + " cannot perform operation " + accessType.name() + " on " + + jobID); + } return job; } private Task verifyAndGetTask(TaskId taskID, - boolean modifyAccess) throws IOException { + JobACL accessType) throws IOException { Task task = verifyAndGetJob(taskID.getJobId(), - modifyAccess).getTask(taskID); + accessType).getTask(taskID); if (task == null) { throw new IOException("Unknown Task " + taskID); } @@ -192,9 +201,9 @@ public class MRClientService extends AbstractService } private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, - boolean modifyAccess) throws IOException { + JobACL accessType) throws IOException { TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), - modifyAccess).getAttempt(attemptID); + accessType).getAttempt(attemptID); if (attempt == null) { throw new IOException("Unknown TaskAttempt " + attemptID); } @@ -205,7 +214,7 @@ public class MRClientService extends AbstractService public GetCountersResponse getCounters(GetCountersRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class); response.setCounters(TypeConverter.toYarn(job.getAllCounters())); @@ -216,7 +225,7 @@ public class MRClientService extends AbstractService public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class); if (job != null) { @@ -235,7 +244,7 @@ public class MRClientService extends AbstractService GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class); response.setTaskAttemptReport( - verifyAndGetAttempt(taskAttemptId, false).getReport()); + verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport()); return response; } @@ -245,7 +254,8 @@ public class MRClientService extends AbstractService TaskId taskId = request.getTaskId(); GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class); - response.setTaskReport(verifyAndGetTask(taskId, false).getReport()); + response.setTaskReport( + verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport()); return response; } @@ -256,7 +266,7 @@ public class MRClientService extends AbstractService JobId jobId = request.getJobId(); int fromEventId = request.getFromEventId(); int maxEvents = request.getMaxEvents(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class); @@ -270,9 +280,11 @@ public class MRClientService extends AbstractService public KillJobResponse killJob(KillJobRequest request) throws IOException { JobId jobId = request.getJobId(); - String message = "Kill Job received from client " + jobId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill job " + jobId + " received from " + callerUGI + + " at " + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetJob(jobId, true); + verifyAndGetJob(jobId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new JobDiagnosticsUpdateEvent(jobId, message)); appContext.getEventHandler().handle( @@ -287,9 +299,11 @@ public class MRClientService extends AbstractService public KillTaskResponse killTask(KillTaskRequest request) throws IOException { TaskId taskId = request.getTaskId(); - String message = "Kill task received from client " + taskId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill task " + taskId + " received from " + callerUGI + + " at " + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetTask(taskId, true); + verifyAndGetTask(taskId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskEvent(taskId, TaskEventType.T_KILL)); KillTaskResponse response = @@ -302,9 +316,12 @@ public class MRClientService extends AbstractService public KillTaskAttemptResponse killTaskAttempt( KillTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - String message = "Kill task attempt received from client " + taskAttemptId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill task attempt " + taskAttemptId + + " received from " + callerUGI + " at " + + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetAttempt(taskAttemptId, true); + verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -322,8 +339,8 @@ public class MRClientService extends AbstractService GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class); - response.addAllDiagnostics( - verifyAndGetAttempt(taskAttemptId, false).getDiagnostics()); + response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, + JobACL.VIEW_JOB).getDiagnostics()); return response; } @@ -332,9 +349,12 @@ public class MRClientService extends AbstractService public FailTaskAttemptResponse failTaskAttempt( FailTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - String message = "Fail task attempt received from client " + taskAttemptId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Fail task attempt " + taskAttemptId + + " received from " + callerUGI + " at " + + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetAttempt(taskAttemptId, true); + verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -356,7 +376,7 @@ public class MRClientService extends AbstractService GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); Collection tasks = job.getTasks(taskType).values(); LOG.info("Getting task report for " + taskType + " " + jobId + ". Report-size will be " + tasks.size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java index 34b8dc76354..b17b8ce7adc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java @@ -18,13 +18,20 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.Assert.fail; + +import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.List; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobACL; +import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; @@ -32,6 +39,9 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompleti import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -51,6 +61,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -169,6 +181,79 @@ public class TestMRClientService { app.waitForState(job, JobState.SUCCEEDED); } + @Test + public void testViewAclOnlyCannotModify() throws Exception { + final MRAppWithClientService app = new MRAppWithClientService(1, 0, false); + final Configuration conf = new Configuration(); + conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); + conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser"); + Job job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task task = it.next(); + app.waitForState(task, TaskState.RUNNING); + TaskAttempt attempt = task.getAttempts().values().iterator().next(); + app.waitForState(attempt, TaskAttemptState.RUNNING); + + UserGroupInformation viewOnlyUser = + UserGroupInformation.createUserForTesting( + "viewonlyuser", new String[] {}); + Assert.assertTrue("viewonlyuser cannot view job", + job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB)); + Assert.assertFalse("viewonlyuser can modify job", + job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB)); + MRClientProtocol client = viewOnlyUser.doAs( + new PrivilegedExceptionAction() { + @Override + public MRClientProtocol run() throws Exception { + YarnRPC rpc = YarnRPC.create(conf); + return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, + app.clientService.getBindAddress(), conf); + } + }); + + KillJobRequest killJobRequest = recordFactory.newRecordInstance( + KillJobRequest.class); + killJobRequest.setJobId(app.getJobId()); + try { + client.killJob(killJobRequest); + fail("viewonlyuser killed job"); + } catch (AccessControlException e) { + // pass + } + + KillTaskRequest killTaskRequest = recordFactory.newRecordInstance( + KillTaskRequest.class); + killTaskRequest.setTaskId(task.getID()); + try { + client.killTask(killTaskRequest); + fail("viewonlyuser killed task"); + } catch (AccessControlException e) { + // pass + } + + KillTaskAttemptRequest killTaskAttemptRequest = + recordFactory.newRecordInstance(KillTaskAttemptRequest.class); + killTaskAttemptRequest.setTaskAttemptId(attempt.getID()); + try { + client.killTaskAttempt(killTaskAttemptRequest); + fail("viewonlyuser killed task attempt"); + } catch (AccessControlException e) { + // pass + } + + FailTaskAttemptRequest failTaskAttemptRequest = + recordFactory.newRecordInstance(FailTaskAttemptRequest.class); + failTaskAttemptRequest.setTaskAttemptId(attempt.getID()); + try { + client.failTaskAttempt(failTaskAttemptRequest); + fail("viewonlyuser killed task attempt"); + } catch (AccessControlException e) { + // pass + } + } + private void verifyJobReport(JobReport jr) { Assert.assertNotNull("JobReport is null", jr); List amInfos = jr.getAMInfos(); From c660339c093ee96e90e3b3778177a499e3b36404 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Sat, 24 Aug 2013 01:15:37 +0000 Subject: [PATCH 067/153] Revert MAPREDUCE-5475 and YARN-707 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517097 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 - .../v2/app/client/MRClientService.java | 64 +++++--------- .../mapreduce/v2/app/TestMRClientService.java | 85 ------------------- hadoop-yarn-project/CHANGES.txt | 2 - .../client/ClientToAMTokenIdentifier.java | 15 +--- .../rmapp/attempt/RMAppAttemptImpl.java | 2 +- .../recovery/TestRMStateStore.java | 2 +- .../security/TestClientToAMTokens.java | 68 +++++---------- 8 files changed, 46 insertions(+), 194 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 387952af62f..5769d926e67 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -243,8 +243,6 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5476. Changed MR AM recovery code to cleanup staging-directory only after unregistering from the RM. (Jian He via vinodkv) - MAPREDUCE-5475. MRClientService does not verify ACLs properly (jlowe) - Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index d36bf62fdf0..4bb39696e1e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -28,7 +28,6 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; @@ -79,8 +78,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider; import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp; import org.apache.hadoop.net.NetUtils; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -178,22 +175,16 @@ public class MRClientService extends AbstractService return getBindAddress(); } - private Job verifyAndGetJob(JobId jobID, - JobACL accessType) throws IOException { + private Job verifyAndGetJob(JobId jobID, + boolean modifyAccess) throws IOException { Job job = appContext.getJob(jobID); - UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); - if (!job.checkAccess(ugi, accessType)) { - throw new AccessControlException("User " + ugi.getShortUserName() - + " cannot perform operation " + accessType.name() + " on " - + jobID); - } return job; } private Task verifyAndGetTask(TaskId taskID, - JobACL accessType) throws IOException { + boolean modifyAccess) throws IOException { Task task = verifyAndGetJob(taskID.getJobId(), - accessType).getTask(taskID); + modifyAccess).getTask(taskID); if (task == null) { throw new IOException("Unknown Task " + taskID); } @@ -201,9 +192,9 @@ public class MRClientService extends AbstractService } private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, - JobACL accessType) throws IOException { + boolean modifyAccess) throws IOException { TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), - accessType).getAttempt(attemptID); + modifyAccess).getAttempt(attemptID); if (attempt == null) { throw new IOException("Unknown TaskAttempt " + attemptID); } @@ -214,7 +205,7 @@ public class MRClientService extends AbstractService public GetCountersResponse getCounters(GetCountersRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class); response.setCounters(TypeConverter.toYarn(job.getAllCounters())); @@ -225,7 +216,7 @@ public class MRClientService extends AbstractService public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class); if (job != null) { @@ -244,7 +235,7 @@ public class MRClientService extends AbstractService GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class); response.setTaskAttemptReport( - verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport()); + verifyAndGetAttempt(taskAttemptId, false).getReport()); return response; } @@ -254,8 +245,7 @@ public class MRClientService extends AbstractService TaskId taskId = request.getTaskId(); GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class); - response.setTaskReport( - verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport()); + response.setTaskReport(verifyAndGetTask(taskId, false).getReport()); return response; } @@ -266,7 +256,7 @@ public class MRClientService extends AbstractService JobId jobId = request.getJobId(); int fromEventId = request.getFromEventId(); int maxEvents = request.getMaxEvents(); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class); @@ -280,11 +270,9 @@ public class MRClientService extends AbstractService public KillJobResponse killJob(KillJobRequest request) throws IOException { JobId jobId = request.getJobId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Kill job " + jobId + " received from " + callerUGI - + " at " + Server.getRemoteAddress(); + String message = "Kill Job received from client " + jobId; LOG.info(message); - verifyAndGetJob(jobId, JobACL.MODIFY_JOB); + verifyAndGetJob(jobId, true); appContext.getEventHandler().handle( new JobDiagnosticsUpdateEvent(jobId, message)); appContext.getEventHandler().handle( @@ -299,11 +287,9 @@ public class MRClientService extends AbstractService public KillTaskResponse killTask(KillTaskRequest request) throws IOException { TaskId taskId = request.getTaskId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Kill task " + taskId + " received from " + callerUGI - + " at " + Server.getRemoteAddress(); + String message = "Kill task received from client " + taskId; LOG.info(message); - verifyAndGetTask(taskId, JobACL.MODIFY_JOB); + verifyAndGetTask(taskId, true); appContext.getEventHandler().handle( new TaskEvent(taskId, TaskEventType.T_KILL)); KillTaskResponse response = @@ -316,12 +302,9 @@ public class MRClientService extends AbstractService public KillTaskAttemptResponse killTaskAttempt( KillTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Kill task attempt " + taskAttemptId - + " received from " + callerUGI + " at " - + Server.getRemoteAddress(); + String message = "Kill task attempt received from client " + taskAttemptId; LOG.info(message); - verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); + verifyAndGetAttempt(taskAttemptId, true); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -339,8 +322,8 @@ public class MRClientService extends AbstractService GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class); - response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, - JobACL.VIEW_JOB).getDiagnostics()); + response.addAllDiagnostics( + verifyAndGetAttempt(taskAttemptId, false).getDiagnostics()); return response; } @@ -349,12 +332,9 @@ public class MRClientService extends AbstractService public FailTaskAttemptResponse failTaskAttempt( FailTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); - String message = "Fail task attempt " + taskAttemptId - + " received from " + callerUGI + " at " - + Server.getRemoteAddress(); + String message = "Fail task attempt received from client " + taskAttemptId; LOG.info(message); - verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); + verifyAndGetAttempt(taskAttemptId, true); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -376,7 +356,7 @@ public class MRClientService extends AbstractService GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class); - Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); + Job job = verifyAndGetJob(jobId, false); Collection tasks = job.getTasks(taskType).values(); LOG.info("Getting task report for " + taskType + " " + jobId + ". Report-size will be " + tasks.size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java index b17b8ce7adc..34b8dc76354 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java @@ -18,20 +18,13 @@ package org.apache.hadoop.mapreduce.v2.app; -import static org.junit.Assert.fail; - -import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.List; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.mapreduce.JobACL; -import org.apache.hadoop.mapreduce.MRConfig; -import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; @@ -39,9 +32,6 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompleti import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest; -import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -61,8 +51,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -181,79 +169,6 @@ public class TestMRClientService { app.waitForState(job, JobState.SUCCEEDED); } - @Test - public void testViewAclOnlyCannotModify() throws Exception { - final MRAppWithClientService app = new MRAppWithClientService(1, 0, false); - final Configuration conf = new Configuration(); - conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); - conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser"); - Job job = app.submit(conf); - app.waitForState(job, JobState.RUNNING); - Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size()); - Iterator it = job.getTasks().values().iterator(); - Task task = it.next(); - app.waitForState(task, TaskState.RUNNING); - TaskAttempt attempt = task.getAttempts().values().iterator().next(); - app.waitForState(attempt, TaskAttemptState.RUNNING); - - UserGroupInformation viewOnlyUser = - UserGroupInformation.createUserForTesting( - "viewonlyuser", new String[] {}); - Assert.assertTrue("viewonlyuser cannot view job", - job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB)); - Assert.assertFalse("viewonlyuser can modify job", - job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB)); - MRClientProtocol client = viewOnlyUser.doAs( - new PrivilegedExceptionAction() { - @Override - public MRClientProtocol run() throws Exception { - YarnRPC rpc = YarnRPC.create(conf); - return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, - app.clientService.getBindAddress(), conf); - } - }); - - KillJobRequest killJobRequest = recordFactory.newRecordInstance( - KillJobRequest.class); - killJobRequest.setJobId(app.getJobId()); - try { - client.killJob(killJobRequest); - fail("viewonlyuser killed job"); - } catch (AccessControlException e) { - // pass - } - - KillTaskRequest killTaskRequest = recordFactory.newRecordInstance( - KillTaskRequest.class); - killTaskRequest.setTaskId(task.getID()); - try { - client.killTask(killTaskRequest); - fail("viewonlyuser killed task"); - } catch (AccessControlException e) { - // pass - } - - KillTaskAttemptRequest killTaskAttemptRequest = - recordFactory.newRecordInstance(KillTaskAttemptRequest.class); - killTaskAttemptRequest.setTaskAttemptId(attempt.getID()); - try { - client.killTaskAttempt(killTaskAttemptRequest); - fail("viewonlyuser killed task attempt"); - } catch (AccessControlException e) { - // pass - } - - FailTaskAttemptRequest failTaskAttemptRequest = - recordFactory.newRecordInstance(FailTaskAttemptRequest.class); - failTaskAttemptRequest.setTaskAttemptId(attempt.getID()); - try { - client.failTaskAttempt(failTaskAttemptRequest); - fail("viewonlyuser killed task attempt"); - } catch (AccessControlException e) { - // pass - } - } - private void verifyJobReport(JobReport jr) { Assert.assertNotNull("JobReport is null", jr); List amInfos = jr.getAMInfos(); diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index eba17467d78..91463c89798 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -45,8 +45,6 @@ Release 2.1.1-beta - UNRELEASED YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza). - YARN-707. Add user info in the YARN ClientToken (vinodkv via jlowe) - OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java index 22497386601..d9c576eead3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java @@ -39,7 +39,6 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { public static final Text KIND_NAME = new Text("YARN_CLIENT_TOKEN"); private ApplicationAttemptId applicationAttemptId; - private Text applicationSubmitter = new Text(); // TODO: Add more information in the tokenID such that it is not // transferrable, more secure etc. @@ -47,27 +46,21 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { public ClientToAMTokenIdentifier() { } - public ClientToAMTokenIdentifier(ApplicationAttemptId id, String appSubmitter) { + public ClientToAMTokenIdentifier(ApplicationAttemptId id) { this(); this.applicationAttemptId = id; - this.applicationSubmitter = new Text(appSubmitter); } public ApplicationAttemptId getApplicationAttemptID() { return this.applicationAttemptId; } - public String getApplicationSubmitter() { - return this.applicationSubmitter.toString(); - } - @Override public void write(DataOutput out) throws IOException { out.writeLong(this.applicationAttemptId.getApplicationId() .getClusterTimestamp()); out.writeInt(this.applicationAttemptId.getApplicationId().getId()); out.writeInt(this.applicationAttemptId.getAttemptId()); - this.applicationSubmitter.write(out); } @Override @@ -75,7 +68,6 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { this.applicationAttemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(in.readLong(), in.readInt()), in.readInt()); - this.applicationSubmitter.readFields(in); } @Override @@ -85,11 +77,10 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { @Override public UserGroupInformation getUser() { - if (this.applicationSubmitter == null) { + if (this.applicationAttemptId == null) { return null; } - return UserGroupInformation.createRemoteUser(this.applicationSubmitter - .toString()); + return UserGroupInformation.createRemoteUser(this.applicationAttemptId.toString()); } @InterfaceAudience.Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 048002456d0..1543110db03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -722,7 +722,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { // create clientToAMToken appAttempt.clientToAMToken = new Token(new ClientToAMTokenIdentifier( - appAttempt.applicationAttemptId, appAttempt.user), + appAttempt.applicationAttemptId), appAttempt.rmContext.getClientToAMTokenSecretManager()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java index 98319522ff8..05916129e3b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java @@ -367,7 +367,7 @@ public class TestRMStateStore { appToken.setService(new Text("appToken service")); ClientToAMTokenIdentifier clientToAMTokenId = - new ClientToAMTokenIdentifier(attemptId, "user"); + new ClientToAMTokenIdentifier(attemptId); clientToAMTokenMgr.registerApplication(attemptId); Token clientToAMToken = new Token(clientToAMTokenId, clientToAMTokenMgr); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java index 6f68804fdcf..fc2fda85202 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java @@ -115,6 +115,7 @@ public class TestClientToAMTokens { private final byte[] secretKey; private InetSocketAddress address; private boolean pinged = false; + private ClientToAMTokenSecretManager secretManager; public CustomAM(ApplicationAttemptId appId, byte[] secretKey) { super("CustomAM"); @@ -131,14 +132,12 @@ public class TestClientToAMTokens { protected void serviceStart() throws Exception { Configuration conf = getConfig(); + secretManager = new ClientToAMTokenSecretManager(this.appAttemptId, secretKey); Server server; try { server = - new RPC.Builder(conf) - .setProtocol(CustomProtocol.class) - .setNumHandlers(1) - .setSecretManager( - new ClientToAMTokenSecretManager(this.appAttemptId, secretKey)) + new RPC.Builder(conf).setProtocol(CustomProtocol.class) + .setNumHandlers(1).setSecretManager(secretManager) .setInstance(this).build(); } catch (Exception e) { throw new YarnRuntimeException(e); @@ -147,10 +146,14 @@ public class TestClientToAMTokens { this.address = NetUtils.getConnectAddress(server); super.serviceStart(); } + + public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() { + return this.secretManager; + } } @Test - public void testClientToAMTokenss() throws Exception { + public void testClientToAMs() throws Exception { final Configuration conf = new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, @@ -201,7 +204,7 @@ public class TestClientToAMTokens { GetApplicationReportResponse reportResponse = rm.getClientRMService().getApplicationReport(request); ApplicationReport appReport = reportResponse.getApplicationReport(); - org.apache.hadoop.yarn.api.records.Token originalClientToAMToken = + org.apache.hadoop.yarn.api.records.Token clientToAMToken = appReport.getClientToAMToken(); ApplicationAttemptId appAttempt = app.getCurrentAppAttempt().getAppAttemptId(); @@ -256,47 +259,17 @@ public class TestClientToAMTokens { Assert.assertFalse(am.pinged); } - Token token = - ConverterUtils.convertFromYarn(originalClientToAMToken, am.address); - - // Verify denial for a malicious user with tampered ID - verifyTokenWithTamperedID(conf, am, token); - - // Verify denial for a malicious user with tampered user-name - verifyTokenWithTamperedUserName(conf, am, token); - - // Now for an authenticated user - verifyValidToken(conf, am, token); - } - - private void verifyTokenWithTamperedID(final Configuration conf, - final CustomAM am, Token token) - throws IOException { - // Malicious user, messes with appId + // Verify denial for a malicious user UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); + Token token = + ConverterUtils.convertFromYarn(clientToAMToken, am.address); + + // Malicious user, messes with appId ClientToAMTokenIdentifier maliciousID = new ClientToAMTokenIdentifier(BuilderUtils.newApplicationAttemptId( - BuilderUtils.newApplicationId(am.appAttemptId.getApplicationId() - .getClusterTimestamp(), 42), 43), UserGroupInformation - .getCurrentUser().getShortUserName()); + BuilderUtils.newApplicationId(app.getApplicationId() + .getClusterTimestamp(), 42), 43)); - verifyTamperedToken(conf, am, token, ugi, maliciousID); - } - - private void verifyTokenWithTamperedUserName(final Configuration conf, - final CustomAM am, Token token) - throws IOException { - // Malicious user, messes with appId - UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); - ClientToAMTokenIdentifier maliciousID = - new ClientToAMTokenIdentifier(am.appAttemptId, "evilOrc"); - - verifyTamperedToken(conf, am, token, ugi, maliciousID); - } - - private void verifyTamperedToken(final Configuration conf, final CustomAM am, - Token token, UserGroupInformation ugi, - ClientToAMTokenIdentifier maliciousID) { Token maliciousToken = new Token(maliciousID.getBytes(), token.getPassword(), token.getKind(), @@ -336,12 +309,8 @@ public class TestClientToAMTokens { + "Mismatched response.")); Assert.assertFalse(am.pinged); } - } - private void verifyValidToken(final Configuration conf, final CustomAM am, - Token token) throws IOException, - InterruptedException { - UserGroupInformation ugi; + // Now for an authenticated user ugi = UserGroupInformation.createRemoteUser("me"); ugi.addToken(token); @@ -357,4 +326,5 @@ public class TestClientToAMTokens { } }); } + } From e86036662c139cd7e67e69a0215471b1ec724a05 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 24 Aug 2013 02:47:47 +0000 Subject: [PATCH 068/153] YARN-1085. Modified YARN and MR2 web-apps to do HTTP authentication in secure setup with kerberos. Contributed by Omkar Vinit Joshi. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517101 13f79535-47bb-0310-9956-ffa450edef68 --- .../mapreduce/v2/hs/HistoryClientService.java | 11 ++++- hadoop-yarn-project/CHANGES.txt | 3 ++ .../hadoop/yarn/conf/YarnConfiguration.java | 20 ++++++++- .../apache/hadoop/yarn/webapp/WebApps.java | 41 +++++++++++++++++-- .../server/nodemanager/webapp/WebServer.java | 11 ++++- .../resourcemanager/ResourceManager.java | 14 +++++-- 6 files changed, 89 insertions(+), 11 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index d3349d25065..2f0f2c2c2a5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -80,6 +80,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -148,8 +149,14 @@ public class HistoryClientService extends AbstractService { JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT); // NOTE: there should be a .at(InetSocketAddress) - WebApps.$for("jobhistory", HistoryClientService.class, this, "ws") - .with(conf).at(NetUtils.getHostPortString(bindAddress)).start(webApp); + WebApps + .$for("jobhistory", HistoryClientService.class, this, "ws") + .with(conf) + .withHttpSpnegoKeytabKey( + YarnConfiguration.JHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) + .withHttpSpnegoPrincipalKey( + YarnConfiguration.JHS_WEBAPP_SPNEGO_USER_NAME_KEY) + .at(NetUtils.getHostPortString(bindAddress)).start(webApp); conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, webApp.getListenerAddress()); } diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 91463c89798..db62f03d1cf 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -87,6 +87,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1082. Create base directories on HDFS after RM login to ensure RM recovery doesn't fail in secure mode. (vinodkv via acmurthy) + YARN-1085. Modified YARN and MR2 web-apps to do HTTP authentication in + secure setup with kerberos. (Omkar Vinit Joshi via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index ec9eb19c4f7..f57091e4380 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -205,6 +205,12 @@ public class YarnConfiguration extends Configuration { public static final String RM_KEYTAB = RM_PREFIX + "keytab"; + public static final String RM_WEBAPP_SPNEGO_USER_NAME_KEY = + RM_PREFIX + "webapp.spnego-principal"; + + public static final String RM_WEBAPP_SPENGO_KEYTAB_FILE_KEY = + RM_PREFIX + "webapp.spengo-keytab-file"; + /** How long to wait until a container is considered dead.*/ public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = RM_PREFIX + "rm.container-allocation.expiry-interval-ms"; @@ -599,7 +605,13 @@ public class YarnConfiguration extends Configuration { public static final String NM_USER_HOME_DIR = NM_PREFIX + "user-home-dir"; - + + public static final String NM_WEBAPP_SPNEGO_USER_NAME_KEY = + NM_PREFIX + "webapp.spnego-principal"; + + public static final String NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = + NM_PREFIX + "webapp.spnego-keytab-file"; + public static final String DEFAULT_NM_USER_HOME_DIR= "/home/"; //////////////////////////////// @@ -729,6 +741,12 @@ public class YarnConfiguration extends Configuration { // Other Configs //////////////////////////////// + public static final String JHS_WEBAPP_SPNEGO_USER_NAME_KEY = + "jobhistoryserver.webapp.spnego-principal"; + + public static final String JHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = + "jobhistoryserver.webapp.spnego-keytab-file"; + /** * The interval of the yarn client's querying application state after * application submission. The unit is millisecond. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index b7bf6361251..87622c2b4f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -33,6 +33,8 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpServer; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.AdminACLsManager; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -65,7 +67,6 @@ import com.google.inject.servlet.GuiceFilter; @InterfaceAudience.LimitedPrivate({"YARN", "MapReduce"}) public class WebApps { static final Logger LOG = LoggerFactory.getLogger(WebApps.class); - public static class Builder { static class ServletStruct { public Class clazz; @@ -82,6 +83,8 @@ public class WebApps { boolean findPort = false; Configuration conf; boolean devMode = false; + private String spnegoPrincipalKey; + private String spnegoKeytabKey; private final HashSet servlets = new HashSet(); private final HashMap attributes = new HashMap(); @@ -135,6 +138,16 @@ public class WebApps { this.conf = conf; return this; } + + public Builder withHttpSpnegoPrincipalKey(String spnegoPrincipalKey) { + this.spnegoPrincipalKey = spnegoPrincipalKey; + return this; + } + + public Builder withHttpSpnegoKeytabKey(String spnegoKeytabKey) { + this.spnegoKeytabKey = spnegoKeytabKey; + return this; + } public Builder inDevMode() { devMode = true; @@ -197,8 +210,30 @@ public class WebApps { } } HttpServer server = - new HttpServer(name, bindAddress, port, findPort, conf, - new AdminACLsManager(conf).getAdminAcl(), null, webapp.getServePathSpecs()); + new HttpServer(name, bindAddress, port, findPort, conf, + new AdminACLsManager(conf).getAdminAcl(), null, + webapp.getServePathSpecs()) { + + { + if (UserGroupInformation.isSecurityEnabled()) { + boolean initSpnego = true; + if (spnegoPrincipalKey == null || spnegoPrincipalKey.isEmpty()) { + LOG.warn("Principal for spnego filter is not set"); + initSpnego = false; + } + if (spnegoKeytabKey == null || spnegoKeytabKey.isEmpty()) { + LOG.warn("Keytab for spnego filter is not set"); + initSpnego = false; + } + if (initSpnego) { + LOG.info("Initializing spnego filter with principal key : " + + spnegoPrincipalKey + " keytab key : " + + spnegoKeytabKey); + initSpnego(conf, spnegoPrincipalKey, spnegoKeytabKey); + } + } + } + }; for(ServletStruct struct: servlets) { server.addServlet(struct.name, struct.spec, struct.clazz); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java index 25dfaf6b529..fab88b9b004 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/WebServer.java @@ -59,8 +59,15 @@ public class WebServer extends AbstractService { LOG.info("Instantiating NMWebApp at " + bindAddress); try { this.webApp = - WebApps.$for("node", Context.class, this.nmContext, "ws") - .at(bindAddress).with(getConfig()).start(this.nmWebApp); + WebApps + .$for("node", Context.class, this.nmContext, "ws") + .at(bindAddress) + .with(getConfig()) + .withHttpSpnegoPrincipalKey( + YarnConfiguration.NM_WEBAPP_SPNEGO_USER_NAME_KEY) + .withHttpSpnegoKeytabKey( + YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) + .start(this.nmWebApp); this.port = this.webApp.httpServer().getPort(); } catch (Exception e) { String msg = "NMWebapps failed to start."; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index a4fb30f85cc..47d947d20f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.source.JvmMetrics; import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.Service; @@ -573,9 +574,16 @@ public class ResourceManager extends CompositeService implements Recoverable { protected void startWepApp() { Builder builder = - WebApps.$for("cluster", ApplicationMasterService.class, masterService, "ws").at( - this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)); + WebApps + .$for("cluster", ApplicationMasterService.class, masterService, + "ws") + .with(conf) + .withHttpSpnegoPrincipalKey( + YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY) + .withHttpSpnegoKeytabKey( + YarnConfiguration.RM_WEBAPP_SPENGO_KEYTAB_FILE_KEY) + .at(this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)); String proxyHostAndPort = YarnConfiguration.getProxyHostAndPort(conf); if(YarnConfiguration.getRMWebAppHostAndPort(conf). equals(proxyHostAndPort)) { From 962da4dcc74d23c7ce78164dcde38ea5aaf3dd68 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 24 Aug 2013 21:16:40 +0000 Subject: [PATCH 069/153] YARN-1074. Cleaned up YARN CLI application list to only display running applications by default. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517196 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop/mapred/ResourceMgrDelegate.java | 26 +- hadoop-yarn-project/CHANGES.txt | 3 + .../GetApplicationsRequest.java | 79 +++++- .../src/main/proto/yarn_service_protos.proto | 1 + .../hadoop/yarn/client/api/YarnClient.java | 50 +++- .../yarn/client/api/impl/YarnClientImpl.java | 22 +- .../yarn/client/cli/ApplicationCLI.java | 86 +++++- .../yarn/client/api/impl/TestYarnClient.java | 93 +++++-- .../hadoop/yarn/client/cli/TestYarnCLI.java | 250 ++++++++++++++---- .../impl/pb/GetApplicationsRequestPBImpl.java | 69 +++++ .../resourcemanager/ClientRMService.java | 22 +- .../server/resourcemanager/RMServerUtils.java | 26 ++ .../resourcemanager/rmapp/RMAppImpl.java | 27 +- 13 files changed, 630 insertions(+), 124 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java index ba02d86bdef..74b07c2f3db 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java @@ -20,6 +20,7 @@ package org.apache.hadoop.mapred; import java.io.IOException; import java.net.InetSocketAddress; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -49,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.NodeState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; @@ -118,8 +120,10 @@ public class ResourceMgrDelegate extends YarnClient { try { Set appTypes = new HashSet(1); appTypes.add(MRJobConfig.MR_APPLICATION_TYPE); + EnumSet appStates = + EnumSet.noneOf(YarnApplicationState.class); return TypeConverter.fromYarnApps( - client.getApplications(appTypes), this.conf); + client.getApplications(appTypes, appStates), this.conf); } catch (YarnException e) { throw new IOException(e); } @@ -299,11 +303,27 @@ public class ResourceMgrDelegate extends YarnClient { } @Override - public List getApplications( - Set applicationTypes) throws YarnException, IOException { + public List getApplications(Set applicationTypes) + throws YarnException, + IOException { return client.getApplications(applicationTypes); } + @Override + public List getApplications( + EnumSet applicationStates) throws YarnException, + IOException { + return client.getApplications(applicationStates); + } + + @Override + public List getApplications( + Set applicationTypes, + EnumSet applicationStates) + throws YarnException, IOException { + return client.getApplications(applicationTypes, applicationStates); + } + @Override public YarnClusterMetrics getYarnClusterMetrics() throws YarnException, IOException { diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index db62f03d1cf..f208581e627 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -44,6 +44,9 @@ Release 2.1.1-beta - UNRELEASED IMPROVEMENTS YARN-589. Expose a REST API for monitoring the fair scheduler (Sandy Ryza). + + YARN-1074. Cleaned up YARN CLI application list to only display running + applications by default. (Xuan Gong via vinodkv) OPTIMIZATIONS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java index 9a732210aa1..e53bf88a4fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetApplicationsRequest.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.api.protocolrecords; +import java.util.EnumSet; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -25,6 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.util.Records; /** @@ -45,16 +47,68 @@ public abstract class GetApplicationsRequest { return request; } + /** + *

+ * The request from clients to get a report of Applications matching the + * giving application types in the cluster from the + * ResourceManager. + *

+ * + * + * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest) + */ @Public @Stable - public static GetApplicationsRequest newInstance( - Set applicationTypes) { + public static GetApplicationsRequest + newInstance(Set applicationTypes) { GetApplicationsRequest request = Records.newRecord(GetApplicationsRequest.class); request.setApplicationTypes(applicationTypes); return request; } + /** + *

+ * The request from clients to get a report of Applications matching the + * giving application states in the cluster from the + * ResourceManager. + *

+ * + * + * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest) + */ + @Public + @Stable + public static GetApplicationsRequest newInstance( + EnumSet applicationStates) { + GetApplicationsRequest request = + Records.newRecord(GetApplicationsRequest.class); + request.setApplicationStates(applicationStates); + return request; + } + + /** + *

+ * The request from clients to get a report of Applications matching the + * giving and application types and application types in the cluster from the + * ResourceManager. + *

+ * + * + * @see ApplicationClientProtocol#getApplications(GetApplicationsRequest) + */ + @Public + @Stable + public static GetApplicationsRequest newInstance( + Set applicationTypes, + EnumSet applicationStates) { + GetApplicationsRequest request = + Records.newRecord(GetApplicationsRequest.class); + request.setApplicationTypes(applicationTypes); + request.setApplicationStates(applicationStates); + return request; + } + /** * Get the application types to filter applications on * @@ -75,4 +129,25 @@ public abstract class GetApplicationsRequest { @Unstable public abstract void setApplicationTypes(Set applicationTypes); + + /** + * Get the application states to filter applications on + * + * @return Set of Application states to filter on + */ + @Public + @Stable + public abstract EnumSet getApplicationStates(); + + /** + * Set the application states to filter applications on + * + * @param applicationStates + * A Set of Application states to filter on. + * If not defined, match all running applications + */ + @Private + @Unstable + public abstract void + setApplicationStates(EnumSet applicationStates); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto index bd009e0d4a6..7b3d0cf77cd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto @@ -122,6 +122,7 @@ message GetClusterMetricsResponseProto { message GetApplicationsRequestProto { repeated string application_types = 1; + repeated YarnApplicationStateProto application_states = 2; } message GetApplicationsResponseProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java index c3587b29170..155ba5d51a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/YarnClient.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.client.api; import java.io.IOException; +import java.util.EnumSet; import java.util.List; import java.util.Set; @@ -36,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Token; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.client.api.impl.YarnClientImpl; import org.apache.hadoop.yarn.exceptions.YarnException; @@ -171,13 +173,13 @@ public abstract class YarnClient extends AbstractService { *

* Get a report (ApplicationReport) of all Applications in the cluster. *

- * + * *

* If the user does not have VIEW_APP access for an application * then the corresponding report will be filtered as described in * {@link #getApplicationReport(ApplicationId)}. *

- * + * * @return a list of reports of all running applications * @throws YarnException * @throws IOException @@ -205,6 +207,50 @@ public abstract class YarnClient extends AbstractService { public abstract List getApplications( Set applicationTypes) throws YarnException, IOException; + /** + *

+ * Get a report (ApplicationReport) of Applications matching the given + * application states in the cluster. + *

+ * + *

+ * If the user does not have VIEW_APP access for an application + * then the corresponding report will be filtered as described in + * {@link #getApplicationReport(ApplicationId)}. + *

+ * + * @param applicationStates + * @return a list of reports of applications + * @throws YarnException + * @throws IOException + */ + public abstract List + getApplications(EnumSet applicationStates) + throws YarnException, IOException; + + /** + *

+ * Get a report (ApplicationReport) of Applications matching the given + * application types and application states in the cluster. + *

+ * + *

+ * If the user does not have VIEW_APP access for an application + * then the corresponding report will be filtered as described in + * {@link #getApplicationReport(ApplicationId)}. + *

+ * + * @param applicationTypes + * @param applicationStates + * @return a list of reports of applications + * @throws YarnException + * @throws IOException + */ + public abstract List getApplications( + Set applicationTypes, + EnumSet applicationStates) throws YarnException, + IOException; + /** *

* Get metrics ({@link YarnClusterMetrics}) about the cluster. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java index 4a1b83ca9e9..d35e1a4300d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/YarnClientImpl.java @@ -211,15 +211,29 @@ public class YarnClientImpl extends YarnClient { @Override public List getApplications() throws YarnException, IOException { - return getApplications(null); + return getApplications(null, null); + } + + @Override + public List getApplications(Set applicationTypes) + throws YarnException, + IOException { + return getApplications(applicationTypes, null); } @Override public List getApplications( - Set applicationTypes) throws YarnException, IOException { + EnumSet applicationStates) + throws YarnException, IOException { + return getApplications(null, applicationStates); + } + + @Override + public List getApplications(Set applicationTypes, + EnumSet applicationStates) throws YarnException, + IOException { GetApplicationsRequest request = - applicationTypes == null ? GetApplicationsRequest.newInstance() - : GetApplicationsRequest.newInstance(applicationTypes); + GetApplicationsRequest.newInstance(applicationTypes, applicationStates); GetApplicationsResponse response = rmClient.getApplications(request); return response.getApplicationList(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 16e55a6a72d..69de37a76c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -21,6 +21,7 @@ import java.io.ByteArrayOutputStream; import java.io.IOException; import java.io.PrintWriter; import java.text.DecimalFormat; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -49,6 +50,10 @@ public class ApplicationCLI extends YarnCLI { System.getProperty("line.separator"); private static final String APP_TYPE_CMD = "appTypes"; + private static final String APP_STATE_CMD ="appStates"; + private static final String ALLSTATES_OPTION = "ALL"; + + private boolean allAppStates; public static void main(String[] args) throws Exception { ApplicationCLI cli = new ApplicationCLI(); @@ -66,7 +71,8 @@ public class ApplicationCLI extends YarnCLI { opts.addOption(STATUS_CMD, true, "Prints the status of the application."); opts.addOption(LIST_CMD, false, "List applications from the RM. " + "Supports optional use of --appTypes to filter applications " + - "based on application type."); + "based on application type, " + + "and --appStates to filter applications based on application state"); opts.addOption(KILL_CMD, true, "Kills the application."); opts.addOption(HELP_CMD, false, "Displays help for all commands."); Option appTypeOpt = new Option(APP_TYPE_CMD, true, @@ -75,6 +81,16 @@ public class ApplicationCLI extends YarnCLI { appTypeOpt.setArgs(Option.UNLIMITED_VALUES); appTypeOpt.setArgName("Comma-separated list of application types"); opts.addOption(appTypeOpt); + Option appStateOpt = + new Option( + APP_STATE_CMD, + true, + "Works with --list to filter applications based on their state. " + + getAllValidApplicationStates()); + appStateOpt.setValueSeparator(','); + appStateOpt.setArgs(Option.UNLIMITED_VALUES); + appStateOpt.setArgName("Comma-separated list of application states"); + opts.addOption(appStateOpt); opts.getOption(KILL_CMD).setArgName("Application ID"); opts.getOption(STATUS_CMD).setArgName("Application ID"); CommandLine cliParser = new GnuParser().parse(opts, args); @@ -87,18 +103,44 @@ public class ApplicationCLI extends YarnCLI { } printApplicationReport(cliParser.getOptionValue(STATUS_CMD)); } else if (cliParser.hasOption(LIST_CMD)) { + allAppStates = false; Set appTypes = new HashSet(); if(cliParser.hasOption(APP_TYPE_CMD)) { String[] types = cliParser.getOptionValues(APP_TYPE_CMD); if (types != null) { for (String type : types) { if (!type.trim().isEmpty()) { - appTypes.add(type.trim()); + appTypes.add(type.toUpperCase().trim()); } } } } - listApplications(appTypes); + + EnumSet appStates = + EnumSet.noneOf(YarnApplicationState.class); + if (cliParser.hasOption(APP_STATE_CMD)) { + String[] states = cliParser.getOptionValues(APP_STATE_CMD); + if (states != null) { + for (String state : states) { + if (!state.trim().isEmpty()) { + if (state.trim().equalsIgnoreCase(ALLSTATES_OPTION)) { + allAppStates = true; + break; + } + try { + appStates.add(YarnApplicationState.valueOf(state.toUpperCase() + .trim())); + } catch (IllegalArgumentException ex) { + sysout.println("The application state " + state + + " is invalid."); + sysout.println(getAllValidApplicationStates()); + return exitCode; + } + } + } + } + } + listApplications(appTypes, appStates); } else if (cliParser.hasOption(KILL_CMD)) { if (args.length != 2) { printUsage(opts); @@ -127,19 +169,33 @@ public class ApplicationCLI extends YarnCLI { /** * Lists the applications matching the given application Types - * present in the Resource Manager + * And application States present in the Resource Manager * * @param appTypes + * @param appStates * @throws YarnException * @throws IOException */ - private void listApplications(Set appTypes) - throws YarnException, IOException { + private void listApplications(Set appTypes, + EnumSet appStates) throws YarnException, + IOException { PrintWriter writer = new PrintWriter(sysout); - List appsReport = - client.getApplications(appTypes); + if (allAppStates) { + for(YarnApplicationState appState : YarnApplicationState.values()) { + appStates.add(appState); + } + } else { + if (appStates.isEmpty()) { + appStates.add(YarnApplicationState.RUNNING); + } + } - writer.println("Total Applications:" + appsReport.size()); + List appsReport = + client.getApplications(appTypes, appStates); + + writer + .println("Total number of applications (application-types: " + appTypes + + " and states: " + appStates + ")" + ":" + appsReport.size()); writer.printf(APPLICATIONS_PATTERN, "Application-Id", "Application-Name","Application-Type", "User", "Queue", "State", "Final-State","Progress", "Tracking-URL"); @@ -229,4 +285,16 @@ public class ApplicationCLI extends YarnCLI { sysout.println(baos.toString("UTF-8")); } + private String getAllValidApplicationStates() { + StringBuilder sb = new StringBuilder(); + sb.append("The valid application state can be" + + " one of the following: "); + sb.append(ALLSTATES_OPTION + ","); + for (YarnApplicationState appState : YarnApplicationState + .values()) { + sb.append(appState+","); + } + String output = sb.toString(); + return output.substring(0, output.length()-1); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 8df430aa8d3..e7a66bd28ef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -27,6 +27,7 @@ import static org.mockito.Mockito.when; import java.io.IOException; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; +import java.util.EnumSet; import java.util.HashMap; import java.util.HashSet; import java.util.List; @@ -55,10 +56,12 @@ import org.apache.hadoop.yarn.client.api.YarnClient; import org.apache.hadoop.yarn.client.api.YarnClientApplication; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.MiniYARNCluster; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; import org.apache.log4j.LogManager; @@ -163,11 +166,15 @@ public class TestYarnClient { List expectedReports = ((MockYarnClient)client).getReports(); + List reports = client.getApplications(); + Assert.assertEquals(reports, expectedReports); + Set appTypes = new HashSet(); appTypes.add("YARN"); appTypes.add("NON-YARN"); - List reports = client.getApplications(appTypes); + reports = + client.getApplications(appTypes, null); Assert.assertEquals(reports.size(), 2); Assert .assertTrue((reports.get(0).getApplicationType().equals("YARN") && reports @@ -178,8 +185,28 @@ public class TestYarnClient { Assert.assertTrue(expectedReports.contains(report)); } - reports = client.getApplications(); - Assert.assertEquals(reports, expectedReports); + EnumSet appStates = + EnumSet.noneOf(YarnApplicationState.class); + appStates.add(YarnApplicationState.FINISHED); + appStates.add(YarnApplicationState.FAILED); + reports = client.getApplications(null, appStates); + Assert.assertEquals(reports.size(), 2); + Assert + .assertTrue((reports.get(0).getApplicationType().equals("NON-YARN") && reports + .get(1).getApplicationType().equals("NON-MAPREDUCE")) + || (reports.get(1).getApplicationType().equals("NON-YARN") && reports + .get(0).getApplicationType().equals("NON-MAPREDUCE"))); + for (ApplicationReport report : reports) { + Assert.assertTrue(expectedReports.contains(report)); + } + + reports = client.getApplications(appTypes, appStates); + Assert.assertEquals(reports.size(), 1); + Assert + .assertTrue((reports.get(0).getApplicationType().equals("NON-YARN"))); + for (ApplicationReport report : reports) { + Assert.assertTrue(expectedReports.contains(report)); + } client.stop(); } @@ -187,6 +214,8 @@ public class TestYarnClient { private static class MockYarnClient extends YarnClientImpl { private ApplicationReport mockReport; private List reports; + GetApplicationsResponse mockAppResponse = + mock(GetApplicationsResponse.class); public MockYarnClient() { super(); @@ -202,6 +231,8 @@ public class TestYarnClient { try{ when(rmClient.getApplicationReport(any( GetApplicationReportRequest.class))).thenReturn(mockResponse); + when(rmClient.getApplications(any(GetApplicationsRequest.class))) + .thenReturn(mockAppResponse); } catch (YarnException e) { Assert.fail("Exception is not expected."); } catch (IOException e) { @@ -212,16 +243,11 @@ public class TestYarnClient { @Override public List getApplications( - Set applicationTypes) throws YarnException, IOException { - GetApplicationsRequest request = - applicationTypes == null ? GetApplicationsRequest.newInstance() - : GetApplicationsRequest.newInstance(applicationTypes); - when(rmClient.getApplications(request)) - .thenReturn( - getApplicationReports(reports, - request)); - GetApplicationsResponse response = rmClient.getApplications(request); - return response.getApplicationList(); + Set applicationTypes, EnumSet applicationStates) + throws YarnException, IOException { + when(mockAppResponse.getApplicationList()).thenReturn( + getApplicationReports(reports, applicationTypes, applicationStates)); + return super.getApplications(applicationTypes, applicationStates); } @Override @@ -243,7 +269,7 @@ public class TestYarnClient { ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, - YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, + YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); List applicationReports = new ArrayList(); @@ -262,31 +288,44 @@ public class TestYarnClient { ApplicationReport newApplicationReport3 = ApplicationReport.newInstance( applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3), "user3", "queue3", "appname3", "host3", 126, null, - YarnApplicationState.FINISHED, "diagnostics3", "url3", 3, 3, - FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", + YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", null); applicationReports.add(newApplicationReport3); + + ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8); + ApplicationReport newApplicationReport4 = + ApplicationReport.newInstance( + applicationId4, + ApplicationAttemptId.newInstance(applicationId4, 4), + "user4", "queue4", "appname4", "host4", 127, null, + YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, + "NON-MAPREDUCE", null); + applicationReports.add(newApplicationReport4); return applicationReports; } - private GetApplicationsResponse getApplicationReports( + private List getApplicationReports( List applicationReports, - GetApplicationsRequest request) { + Set applicationTypes, EnumSet applicationStates) { List appReports = new ArrayList(); - Set appTypes = request.getApplicationTypes(); - boolean bypassFilter = appTypes.isEmpty(); - for (ApplicationReport appReport : applicationReports) { - if (!(bypassFilter || appTypes.contains( - appReport.getApplicationType()))) { - continue; + if (applicationTypes != null && !applicationTypes.isEmpty()) { + if (!applicationTypes.contains(appReport.getApplicationType())) { + continue; + } + } + + if (applicationStates != null && !applicationStates.isEmpty()) { + if (!applicationStates.contains(appReport.getYarnApplicationState())) { + continue; + } } appReports.add(appReport); } - GetApplicationsResponse response = - GetApplicationsResponse.newInstance(appReports); - return response; + return appReports; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index c33ddea38b7..6d53108f95a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -33,6 +33,7 @@ import java.io.PrintStream; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Date; +import java.util.EnumSet; import java.util.HashSet; import java.util.List; import java.util.Set; @@ -134,7 +135,7 @@ public class TestYarnCLI { ApplicationReport newApplicationReport = ApplicationReport.newInstance( applicationId, ApplicationAttemptId.newInstance(applicationId, 1), "user", "queue", "appname", "host", 124, null, - YarnApplicationState.FINISHED, "diagnostics", "url", 0, 0, + YarnApplicationState.RUNNING, "diagnostics", "url", 0, 0, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.53789f, "YARN", null); List applicationReports = new ArrayList(); applicationReports.add(newApplicationReport); @@ -152,23 +153,39 @@ public class TestYarnCLI { ApplicationReport newApplicationReport3 = ApplicationReport.newInstance( applicationId3, ApplicationAttemptId.newInstance(applicationId3, 3), "user3", "queue3", "appname3", "host3", 126, null, - YarnApplicationState.FINISHED, "diagnostics3", "url3", 3, 3, + YarnApplicationState.RUNNING, "diagnostics3", "url3", 3, 3, FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.73789f, "MAPREDUCE", null); applicationReports.add(newApplicationReport3); - Set appType1 = new HashSet(); - appType1.add("YARN"); + ApplicationId applicationId4 = ApplicationId.newInstance(1234, 8); + ApplicationReport newApplicationReport4 = ApplicationReport.newInstance( + applicationId4, ApplicationAttemptId.newInstance(applicationId4, 4), + "user4", "queue4", "appname4", "host4", 127, null, + YarnApplicationState.FAILED, "diagnostics4", "url4", 4, 4, + FinalApplicationStatus.SUCCEEDED, null, "N/A", 0.83789f, "NON-MAPREDUCE", + null); + applicationReports.add(newApplicationReport4); - when(client.getApplications(appType1)).thenReturn( - getApplicationReports(applicationReports, appType1)); - int result = cli.run(new String[] { "-list", "-appTypes", "YARN" }); + // Test command yarn application -list + // if the set appStates is empty, RUNNING state will be automatically added + // to the appStates list + // the output of yarn application -list should be the same as + // equals to yarn application -list --appStates RUNNING + Set appType1 = new HashSet(); + EnumSet appState1 = + EnumSet.noneOf(YarnApplicationState.class); + appState1.add(YarnApplicationState.RUNNING); + when(client.getApplications(appType1, appState1)).thenReturn( + getApplicationReports(applicationReports, appType1, appState1, false)); + int result = cli.run(new String[] { "-list" }); assertEquals(0, result); - verify(client).getApplications(appType1); + verify(client).getApplications(appType1, appState1); ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); - pw.println("Total Applications:1"); + pw.println("Total number of applications (application-types: " + appType1 + + " and states: " + appState1 + ")" + ":" + 2); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); @@ -176,27 +193,41 @@ public class TestYarnCLI { pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); - pw.print("queue\t FINISHED\t "); + pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); + pw.print(" application_1234_0007\t "); + pw.print("appname3\t MAPREDUCE\t user3\t "); + pw.print("queue3\t RUNNING\t "); + pw.print("SUCCEEDED\t 73.79%"); + pw.println("\t N/A"); pw.close(); String appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(1)).write(any(byte[].class), anyInt(), anyInt()); + //Test command yarn application -list --appTypes apptype1,apptype2 + //the output should be the same as + //yarn application -list --appTypes apptyp1, apptype2 --appStates RUNNING sysOutStream.reset(); Set appType2 = new HashSet(); appType2.add("YARN"); - appType2.add("FOO-YARN"); - when(client.getApplications(appType2)).thenReturn( - getApplicationReports(applicationReports, appType2)); - cli.run(new String[] { "-list", "-appTypes", "YARN , ,, ,FOO-YARN", - ",,,,, YARN,," }); + appType2.add("NON-YARN"); + + EnumSet appState2 = + EnumSet.noneOf(YarnApplicationState.class); + appState2.add(YarnApplicationState.RUNNING); + when(client.getApplications(appType2, appState2)).thenReturn( + getApplicationReports(applicationReports, appType2, appState2, false)); + result = + cli.run(new String[] { "-list", "-appTypes", "YARN, ,, NON-YARN", + " ,, ,," }); assertEquals(0, result); - verify(client).getApplications(appType2); + verify(client).getApplications(appType2, appState2); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); - pw.println("Total Applications:1"); + pw.println("Total number of applications (application-types: " + appType2 + + " and states: " + appState2 + ")" + ":" + 1); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); @@ -204,7 +235,7 @@ public class TestYarnCLI { pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); - pw.print("queue\t FINISHED\t "); + pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.close(); @@ -212,29 +243,74 @@ public class TestYarnCLI { Assert.assertEquals(appsReportStr, sysOutStream.toString()); verify(sysOut, times(2)).write(any(byte[].class), anyInt(), anyInt()); + //Test command yarn application -list --appStates appState1,appState2 sysOutStream.reset(); Set appType3 = new HashSet(); - appType3.add("YARN"); - appType3.add("NON-YARN"); - when(client.getApplications(appType3)).thenReturn( - getApplicationReports(applicationReports, appType3)); - result = cli.run(new String[] { "-list", "-appTypes", "YARN,NON-YARN" }); + EnumSet appState3 = + EnumSet.noneOf(YarnApplicationState.class); + appState3.add(YarnApplicationState.FINISHED); + appState3.add(YarnApplicationState.FAILED); + + when(client.getApplications(appType3, appState3)).thenReturn( + getApplicationReports(applicationReports, appType3, appState3, false)); + result = + cli.run(new String[] { "-list", "--appStates", "FINISHED ,, , FAILED", + ",,FINISHED" }); assertEquals(0, result); - verify(client).getApplications(appType3); + verify(client).getApplications(appType3, appState3); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); - pw.println("Total Applications:2"); + pw.println("Total number of applications (application-types: " + appType3 + + " and states: " + appState3 + ")" + ":" + 2); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); pw.print("Final-State\t Progress"); pw.println("\t Tracking-URL"); - pw.print(" application_1234_0005\t "); - pw.print("appname\t YARN\t user\t "); - pw.print("queue\t FINISHED\t "); - pw.print("SUCCEEDED\t 53.79%"); + pw.print(" application_1234_0006\t "); + pw.print("appname2\t NON-YARN\t user2\t "); + pw.print("queue2\t FINISHED\t "); + pw.print("SUCCEEDED\t 63.79%"); pw.println("\t N/A"); + pw.print(" application_1234_0008\t "); + pw.print("appname4\t NON-MAPREDUCE\t user4\t "); + pw.print("queue4\t FAILED\t "); + pw.print("SUCCEEDED\t 83.79%"); + pw.println("\t N/A"); + pw.close(); + appsReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appsReportStr, sysOutStream.toString()); + verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt()); + + // Test command yarn application -list --appTypes apptype1,apptype2 + // --appStates appstate1,appstate2 + sysOutStream.reset(); + Set appType4 = new HashSet(); + appType4.add("YARN"); + appType4.add("NON-YARN"); + + EnumSet appState4 = + EnumSet.noneOf(YarnApplicationState.class); + appState4.add(YarnApplicationState.FINISHED); + appState4.add(YarnApplicationState.FAILED); + + when(client.getApplications(appType4, appState4)).thenReturn( + getApplicationReports(applicationReports, appType4, appState4, false)); + result = + cli.run(new String[] { "-list", "--appTypes", "YARN,NON-YARN", + "--appStates", "FINISHED ,, , FAILED" }); + assertEquals(0, result); + verify(client).getApplications(appType2, appState2); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total number of applications (application-types: " + appType4 + + " and states: " + appState4 + ")" + ":" + 1); + pw.print(" Application-Id\t Application-Name"); + pw.print("\t Application-Type"); + pw.print("\t User\t Queue\t State\t "); + pw.print("Final-State\t Progress"); + pw.println("\t Tracking-URL"); pw.print(" application_1234_0006\t "); pw.print("appname2\t NON-YARN\t user2\t "); pw.print("queue2\t FINISHED\t "); @@ -243,19 +319,46 @@ public class TestYarnCLI { pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); - verify(sysOut, times(3)).write(any(byte[].class), anyInt(), anyInt()); + verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt()); + //Test command yarn application -list --appStates with invalid appStates sysOutStream.reset(); - Set appType4 = new HashSet(); - when(client.getApplications(appType4)).thenReturn( - getApplicationReports(applicationReports, appType4)); - result = cli.run(new String[] { "-list" }); - assertEquals(0, result); - verify(client).getApplications(appType4); - + result = + cli.run(new String[] { "-list", "--appStates", "FINISHED ,, , INVALID" }); + assertEquals(-1, result); baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); - pw.println("Total Applications:3"); + pw.println("The application state INVALID is invalid."); + pw.print("The valid application state can be one of the following: "); + StringBuilder sb = new StringBuilder(); + sb.append("ALL,"); + for(YarnApplicationState state : YarnApplicationState.values()) { + sb.append(state+","); + } + String output = sb.toString(); + pw.println(output.substring(0, output.length()-1)); + pw.close(); + appsReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appsReportStr, sysOutStream.toString()); + verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt()); + + //Test command yarn application -list --appStates all + sysOutStream.reset(); + Set appType5 = new HashSet(); + + EnumSet appState5 = + EnumSet.noneOf(YarnApplicationState.class); + appState5.add(YarnApplicationState.FINISHED); + when(client.getApplications(appType5, appState5)).thenReturn( + getApplicationReports(applicationReports, appType5, appState5, true)); + result = + cli.run(new String[] { "-list", "--appStates", "FINISHED ,, , ALL" }); + assertEquals(0, result); + verify(client).getApplications(appType5, appState5); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total number of applications (application-types: " + appType5 + + " and states: " + appState5 + ")" + ":" + 4); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); @@ -263,7 +366,7 @@ public class TestYarnCLI { pw.println("\t Tracking-URL"); pw.print(" application_1234_0005\t "); pw.print("appname\t YARN\t user\t "); - pw.print("queue\t FINISHED\t "); + pw.print("queue\t RUNNING\t "); pw.print("SUCCEEDED\t 53.79%"); pw.println("\t N/A"); pw.print(" application_1234_0006\t "); @@ -273,27 +376,80 @@ public class TestYarnCLI { pw.println("\t N/A"); pw.print(" application_1234_0007\t "); pw.print("appname3\t MAPREDUCE\t user3\t "); - pw.print("queue3\t FINISHED\t "); + pw.print("queue3\t RUNNING\t "); pw.print("SUCCEEDED\t 73.79%"); pw.println("\t N/A"); + pw.print(" application_1234_0008\t "); + pw.print("appname4\t NON-MAPREDUCE\t user4\t "); + pw.print("queue4\t FAILED\t "); + pw.print("SUCCEEDED\t 83.79%"); + pw.println("\t N/A"); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); - verify(sysOut, times(4)).write(any(byte[].class), anyInt(), anyInt()); + verify(sysOut, times(5)).write(any(byte[].class), anyInt(), anyInt()); + + // Test command yarn application user case insensitive + sysOutStream.reset(); + Set appType6 = new HashSet(); + appType6.add("YARN"); + appType6.add("NON-YARN"); + + EnumSet appState6 = + EnumSet.noneOf(YarnApplicationState.class); + appState6.add(YarnApplicationState.FINISHED); + when(client.getApplications(appType6, appState6)).thenReturn( + getApplicationReports(applicationReports, appType6, appState6, false)); + result = + cli.run(new String[] { "-list", "-appTypes", "YARN, ,, NON-YARN", + "--appStates", "finished" }); + assertEquals(0, result); + verify(client).getApplications(appType6, appState6); + baos = new ByteArrayOutputStream(); + pw = new PrintWriter(baos); + pw.println("Total number of applications (application-types: " + appType6 + + " and states: " + appState6 + ")" + ":" + 1); + pw.print(" Application-Id\t Application-Name"); + pw.print("\t Application-Type"); + pw.print("\t User\t Queue\t State\t "); + pw.print("Final-State\t Progress"); + pw.println("\t Tracking-URL"); + pw.print(" application_1234_0006\t "); + pw.print("appname2\t NON-YARN\t user2\t "); + pw.print("queue2\t FINISHED\t "); + pw.print("SUCCEEDED\t 63.79%"); + pw.println("\t N/A"); + pw.close(); + appsReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appsReportStr, sysOutStream.toString()); + verify(sysOut, times(6)).write(any(byte[].class), anyInt(), anyInt()); } private List getApplicationReports( List applicationReports, - Set appTypes) { + Set appTypes, EnumSet appStates, + boolean allStates) { List appReports = new ArrayList(); - boolean bypassFilter = appTypes.isEmpty(); - for (ApplicationReport appReport : applicationReports) { - if (!(bypassFilter || appTypes.contains( - appReport.getApplicationType()))) { - continue; + if (allStates) { + for(YarnApplicationState state : YarnApplicationState.values()) { + appStates.add(state); } + } + for (ApplicationReport appReport : applicationReports) { + if (appTypes != null && !appTypes.isEmpty()) { + if (!appTypes.contains(appReport.getApplicationType())) { + continue; + } + } + + if (appStates != null && !appStates.isEmpty()) { + if (!appStates.contains(appReport.getYarnApplicationState())) { + continue; + } + } + appReports.add(appReport); } return appReports; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java index 48a8d85ab8e..33f74f0903e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetApplicationsRequestPBImpl.java @@ -18,13 +18,18 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import java.util.EnumSet; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils; +import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationsRequestProtoOrBuilder; @@ -38,6 +43,7 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { boolean viaProto = false; Set applicationTypes = null; + EnumSet applicationStates = null; public GetApplicationsRequestPBImpl() { builder = GetApplicationsRequestProto.newBuilder(); @@ -67,6 +73,40 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { if (this.applicationTypes != null) { addLocalApplicationTypesToProto(); } + if (this.applicationStates != null) { + maybeInitBuilder(); + builder.clearApplicationStates(); + Iterable iterable = + new Iterable() { + + @Override + public Iterator iterator() { + return new Iterator() { + + Iterator iter = applicationStates + .iterator(); + + @Override + public boolean hasNext() { + return iter.hasNext(); + } + + @Override + public YarnApplicationStateProto next() { + return ProtoUtils.convertToProtoFormat(iter.next()); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + + } + }; + + } + }; + builder.addAllApplicationStates(iterable); + } } private void addLocalApplicationTypesToProto() { @@ -94,6 +134,20 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { this.applicationTypes.addAll(appTypeList); } + private void initApplicationStates() { + if (this.applicationStates != null) { + return; + } + GetApplicationsRequestProtoOrBuilder p = viaProto ? proto : builder; + List appStatesList = + p.getApplicationStatesList(); + this.applicationStates = EnumSet.noneOf(YarnApplicationState.class); + + for (YarnApplicationStateProto c : appStatesList) { + this.applicationStates.add(ProtoUtils.convertFromProtoFormat(c)); + } + } + @Override public Set getApplicationTypes() { initApplicationTypes(); @@ -108,6 +162,21 @@ public class GetApplicationsRequestPBImpl extends GetApplicationsRequest { this.applicationTypes = applicationTypes; } + @Override + public EnumSet getApplicationStates() { + initApplicationStates(); + return this.applicationStates; + } + + @Override + public void setApplicationStates(EnumSet applicationStates) { + maybeInitBuilder(); + if (applicationStates == null) { + builder.clearApplicationStates(); + } + this.applicationStates = applicationStates; + } + @Override public int hashCode() { return getProto().hashCode(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 97f0ef8e0b1..81fdd56b833 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -73,10 +73,12 @@ import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueInfo; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -86,6 +88,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstant import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; @@ -394,7 +397,6 @@ public class ClientRMService extends AbstractService implements @Override public GetApplicationsResponse getApplications( GetApplicationsRequest request) throws YarnException { - UserGroupInformation callerUGI; try { callerUGI = UserGroupInformation.getCurrentUser(); @@ -404,12 +406,22 @@ public class ClientRMService extends AbstractService implements } Set applicationTypes = request.getApplicationTypes(); - boolean bypassFilter = applicationTypes.isEmpty(); + EnumSet applicationStates = + request.getApplicationStates(); + List reports = new ArrayList(); for (RMApp application : this.rmContext.getRMApps().values()) { - if (!(bypassFilter || applicationTypes.contains(application - .getApplicationType()))) { - continue; + if (applicationTypes != null && !applicationTypes.isEmpty()) { + if (!applicationTypes.contains(application.getApplicationType())) { + continue; + } + } + + if (applicationStates != null && !applicationStates.isEmpty()) { + if (!applicationStates.contains(RMServerUtils + .createApplicationState(application.getState()))) { + continue; + } } boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, application.getApplicationId()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index 241d7bcbd7c..15d306293e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -28,9 +28,12 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.InvalidContainerReleaseException; import org.apache.hadoop.yarn.exceptions.InvalidResourceBlacklistRequestException; import org.apache.hadoop.yarn.exceptions.InvalidResourceRequestException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerUtils; @@ -112,4 +115,27 @@ public class RMServerUtils { } } } + + public static YarnApplicationState createApplicationState(RMAppState rmAppState) { + switch(rmAppState) { + case NEW: + return YarnApplicationState.NEW; + case NEW_SAVING: + return YarnApplicationState.NEW_SAVING; + case SUBMITTED: + return YarnApplicationState.SUBMITTED; + case ACCEPTED: + return YarnApplicationState.ACCEPTED; + case RUNNING: + return YarnApplicationState.RUNNING; + case FINISHING: + case FINISHED: + return YarnApplicationState.FINISHED; + case KILLED: + return YarnApplicationState.KILLED; + case FAILED: + return YarnApplicationState.FAILED; + } + throw new YarnRuntimeException("Unknown state passed!"); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index a11b05eedc9..79398840ce1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -44,7 +44,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; -import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; @@ -55,6 +54,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; @@ -378,29 +378,6 @@ public class RMAppImpl implements RMApp, Recoverable { } } - private YarnApplicationState createApplicationState(RMAppState rmAppState) { - switch(rmAppState) { - case NEW: - return YarnApplicationState.NEW; - case NEW_SAVING: - return YarnApplicationState.NEW_SAVING; - case SUBMITTED: - return YarnApplicationState.SUBMITTED; - case ACCEPTED: - return YarnApplicationState.ACCEPTED; - case RUNNING: - return YarnApplicationState.RUNNING; - case FINISHING: - case FINISHED: - return YarnApplicationState.FINISHED; - case KILLED: - return YarnApplicationState.KILLED; - case FAILED: - return YarnApplicationState.FAILED; - } - throw new YarnRuntimeException("Unknown state passed!"); - } - private FinalApplicationStatus createFinalApplicationStatus(RMAppState state) { switch(state) { case NEW: @@ -500,7 +477,7 @@ public class RMAppImpl implements RMApp, Recoverable { return BuilderUtils.newApplicationReport(this.applicationId, currentApplicationAttemptId, this.user, this.queue, this.name, host, rpcPort, clientToAMToken, - createApplicationState(this.stateMachine.getCurrentState()), diags, + RMServerUtils.createApplicationState(this.stateMachine.getCurrentState()), diags, trackingUrl, this.startTime, this.finishTime, finishState, appUsageReport, origTrackingUrl, progress, this.applicationType, amrmToken); From 18e805677d00616997ad7e9c6991bbfb26d73ef4 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 24 Aug 2013 23:32:41 +0000 Subject: [PATCH 070/153] YARN-1094. Fixed a blocker with RM restart code because of which RM crashes when try to recover an existing app. Contributed by Vinod Kumar Vavilapalli. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517215 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../GetDelegationTokenRequest.java | 2 +- .../server/resourcemanager/RMContextImpl.java | 12 ++--- .../resourcemanager/ResourceManager.java | 16 ++++-- .../resourcemanager/rmnode/RMNodeImpl.java | 9 +++- .../server/resourcemanager/TestRMRestart.java | 49 +++++++++---------- 6 files changed, 54 insertions(+), 37 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f208581e627..ebfe2b56486 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -93,6 +93,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1085. Modified YARN and MR2 web-apps to do HTTP authentication in secure setup with kerberos. (Omkar Vinit Joshi via vinodkv) + YARN-1094. Fixed a blocker with RM restart code because of which RM crashes + when try to recover an existing app. (vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java index 3d16d35506f..5268d8f6576 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/GetDelegationTokenRequest.java @@ -33,7 +33,7 @@ public abstract class GetDelegationTokenRequest { @Public @Stable - public GetDelegationTokenRequest newInstance(String renewer) { + public static GetDelegationTokenRequest newInstance(String renewer) { GetDelegationTokenRequest request = Records.newRecord(GetDelegationTokenRequest.class); request.setRenewer(renewer); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index 368f9c49c8e..f40453b5bbe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -56,7 +56,7 @@ public class RMContextImpl implements RMContext { private AMLivelinessMonitor amFinishingMonitor; private RMStateStore stateStore = null; private ContainerAllocationExpirer containerAllocationExpirer; - private final DelegationTokenRenewer tokenRenewer; + private final DelegationTokenRenewer delegationTokenRenewer; private final AMRMTokenSecretManager amRMTokenSecretManager; private final RMContainerTokenSecretManager containerTokenSecretManager; private final NMTokenSecretManagerInRM nmTokenSecretManager; @@ -67,7 +67,7 @@ public class RMContextImpl implements RMContext { ContainerAllocationExpirer containerAllocationExpirer, AMLivelinessMonitor amLivelinessMonitor, AMLivelinessMonitor amFinishingMonitor, - DelegationTokenRenewer tokenRenewer, + DelegationTokenRenewer delegationTokenRenewer, AMRMTokenSecretManager amRMTokenSecretManager, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, @@ -77,7 +77,7 @@ public class RMContextImpl implements RMContext { this.containerAllocationExpirer = containerAllocationExpirer; this.amLivelinessMonitor = amLivelinessMonitor; this.amFinishingMonitor = amFinishingMonitor; - this.tokenRenewer = tokenRenewer; + this.delegationTokenRenewer = delegationTokenRenewer; this.amRMTokenSecretManager = amRMTokenSecretManager; this.containerTokenSecretManager = containerTokenSecretManager; this.nmTokenSecretManager = nmTokenSecretManager; @@ -90,13 +90,13 @@ public class RMContextImpl implements RMContext { ContainerAllocationExpirer containerAllocationExpirer, AMLivelinessMonitor amLivelinessMonitor, AMLivelinessMonitor amFinishingMonitor, - DelegationTokenRenewer tokenRenewer, + DelegationTokenRenewer delegationTokenRenewer, AMRMTokenSecretManager appTokenSecretManager, RMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInRM nmTokenSecretManager, ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager) { this(rmDispatcher, null, containerAllocationExpirer, amLivelinessMonitor, - amFinishingMonitor, tokenRenewer, appTokenSecretManager, + amFinishingMonitor, delegationTokenRenewer, appTokenSecretManager, containerTokenSecretManager, nmTokenSecretManager, clientToAMTokenSecretManager); RMStateStore nullStore = new NullRMStateStore(); @@ -151,7 +151,7 @@ public class RMContextImpl implements RMContext { @Override public DelegationTokenRenewer getDelegationTokenRenewer() { - return tokenRenewer; + return delegationTokenRenewer; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 47d947d20f7..ed30331c8af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -130,6 +130,7 @@ public class ResourceManager extends CompositeService implements Recoverable { protected RMAppManager rmAppManager; protected ApplicationACLsManager applicationACLsManager; protected RMDelegationTokenSecretManager rmDTSecretManager; + private DelegationTokenRenewer delegationTokenRenewer; private WebApp webApp; protected RMContext rmContext; protected ResourceTrackerService resourceTracker; @@ -169,8 +170,10 @@ public class ResourceManager extends CompositeService implements Recoverable { AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); addService(amFinishingMonitor); - DelegationTokenRenewer tokenRenewer = createDelegationTokenRenewer(); - addService(tokenRenewer); + if (UserGroupInformation.isSecurityEnabled()) { + this.delegationTokenRenewer = createDelegationTokenRenewer(); + addService(delegationTokenRenewer); + } this.containerTokenSecretManager = createContainerTokenSecretManager(conf); this.nmTokenSecretManager = createNMTokenSecretManager(conf); @@ -201,7 +204,7 @@ public class ResourceManager extends CompositeService implements Recoverable { this.rmContext = new RMContextImpl(this.rmDispatcher, rmStore, this.containerAllocationExpirer, amLivelinessMonitor, - amFinishingMonitor, tokenRenewer, this.amRmTokenSecretManager, + amFinishingMonitor, delegationTokenRenewer, this.amRmTokenSecretManager, this.containerTokenSecretManager, this.nmTokenSecretManager, this.clientToAMSecretManager); @@ -610,6 +613,13 @@ public class ResourceManager extends CompositeService implements Recoverable { this.containerTokenSecretManager.start(); this.nmTokenSecretManager.start(); + // Explicitly start DTRenewer too in secure mode before kicking recovery as + // tokens will start getting added for renewal as part of the recovery + // process itself. + if (UserGroupInformation.isSecurityEnabled()) { + this.delegationTokenRenewer.start(); + } + RMStateStore rmStore = rmContext.getStateStore(); // The state store needs to start irrespective of recoveryEnabled as apps // need events to move to further states. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 3fc6bf80fac..3158a3b6c2d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -35,6 +35,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.net.Node; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; @@ -601,9 +602,13 @@ public class RMNodeImpl implements RMNode, EventHandler { rmNode.context.getDispatcher().getEventHandler().handle( new NodeUpdateSchedulerEvent(rmNode)); } - - rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications( + + // Update DTRenewer in secure mode to keep these apps alive. Today this is + // needed for log-aggregation to finish long after the apps are gone. + if (UserGroupInformation.isSecurityEnabled()) { + rmNode.context.getDelegationTokenRenewer().updateKeepAliveApplications( statusEvent.getKeepAliveAppIds()); + } return NodeState.RUNNING; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index e667633512d..7977b30db5a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -18,10 +18,10 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; - import java.io.IOException; +import java.net.InetAddress; +import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.HashMap; @@ -35,6 +35,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.DelegationKey; @@ -63,7 +64,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; -import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.log4j.Level; @@ -77,8 +77,11 @@ public class TestRMRestart { private YarnConfiguration conf; + // Fake rmAddr for token-renewal + private static InetSocketAddress rmAddr; + @Before - public void setup() { + public void setup() throws UnknownHostException { Logger rootLogger = LogManager.getRootLogger(); rootLogger.setLevel(Level.DEBUG); ExitUtil.disableSystemExit(); @@ -86,6 +89,8 @@ public class TestRMRestart { UserGroupInformation.setConfiguration(conf); conf.set(YarnConfiguration.RECOVERY_ENABLED, "true"); conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); + + rmAddr = new InetSocketAddress(InetAddress.getLocalHost(), 123); } @Test (timeout=180000) @@ -446,6 +451,7 @@ public class TestRMRestart { Token token1 = new Token(dtId1, rm1.getRMDTSecretManager()); + SecurityUtil.setTokenService(token1, rmAddr); ts.addToken(userText1, token1); tokenSet.add(token1); @@ -456,6 +462,7 @@ public class TestRMRestart { Token token2 = new Token(dtId2, rm1.getRMDTSecretManager()); + SecurityUtil.setTokenService(token2, rmAddr); ts.addToken(userText2, token2); tokenSet.add(token2); @@ -575,6 +582,7 @@ public class TestRMRestart { @Test public void testRMDelegationTokenRestoredOnRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); + MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); RMState rmState = memStore.getState(); @@ -587,20 +595,21 @@ public class TestRMRestart { rmState.getRMDTSecretManagerState().getMasterKeyState(); MockRM rm1 = new TestSecurityMockRM(conf, memStore); + rm1.start(); // create an empty credential Credentials ts = new Credentials(); // request a token and add into credential - GetDelegationTokenRequest request1 = mock(GetDelegationTokenRequest.class); - when(request1.getRenewer()).thenReturn("renewer1"); + GetDelegationTokenRequest request1 = + GetDelegationTokenRequest.newInstance("renewer1"); GetDelegationTokenResponse response1 = rm1.getClientRMService().getDelegationToken(request1); org.apache.hadoop.yarn.api.records.Token delegationToken1 = response1.getRMDelegationToken(); Token token1 = - ConverterUtils.convertFromYarn(delegationToken1, null); + ConverterUtils.convertFromYarn(delegationToken1, rmAddr); RMDelegationTokenIdentifier dtId1 = token1.decodeIdentifier(); HashSet tokenIdentSet = @@ -632,14 +641,14 @@ public class TestRMRestart { rmState.getRMDTSecretManagerState().getDTSequenceNumber()); // request one more token - GetDelegationTokenRequest request2 = mock(GetDelegationTokenRequest.class); - when(request2.getRenewer()).thenReturn("renewer2"); + GetDelegationTokenRequest request2 = + GetDelegationTokenRequest.newInstance("renewer2"); GetDelegationTokenResponse response2 = rm1.getClientRMService().getDelegationToken(request2); org.apache.hadoop.yarn.api.records.Token delegationToken2 = response2.getRMDelegationToken(); Token token2 = - ConverterUtils.convertFromYarn(delegationToken2, null); + ConverterUtils.convertFromYarn(delegationToken2, rmAddr); RMDelegationTokenIdentifier dtId2 = token2.decodeIdentifier(); // cancel token2 @@ -721,20 +730,10 @@ public class TestRMRestart { } @Override - protected DelegationTokenRenewer createDelegationTokenRenewer() { - return new DelegationTokenRenewer() { - @Override - protected void renewToken(final DelegationTokenToRenew dttr) - throws IOException { - // Do nothing - } - - @Override - protected void setTimerForTokenRenewal(DelegationTokenToRenew token) - throws IOException { - // Do nothing - } - }; + protected void serviceInit(Configuration conf) throws Exception { + super.serviceInit(conf); + RMDelegationTokenIdentifier.Renewer.setSecretManager( + this.getRMDTSecretManager(), rmAddr); } } } From 942e2ebaa54306ffc5b0ffb403e552764a40d58c Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Mon, 26 Aug 2013 15:39:11 +0000 Subject: [PATCH 071/153] YARN-1008. MiniYARNCluster with multiple nodemanagers, all nodes have same key for allocations. (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517563 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../hadoop/yarn/conf/YarnConfiguration.java | 9 ++++ .../scheduler/AppSchedulingInfo.java | 2 +- .../scheduler/SchedulerNode.java | 14 ++++-- .../scheduler/capacity/CapacityScheduler.java | 7 ++- .../CapacitySchedulerConfiguration.java | 5 ++ .../scheduler/capacity/LeafQueue.java | 8 +-- .../common/fica/FiCaSchedulerNode.java | 12 +++-- .../common/fica/FiCaSchedulerUtils.java | 4 +- .../scheduler/fair/AppSchedulable.java | 6 +-- .../scheduler/fair/FSSchedulerNode.java | 12 +++-- .../scheduler/fair/FairScheduler.java | 7 +-- .../fair/FairSchedulerConfiguration.java | 7 ++- .../scheduler/fifo/FifoScheduler.java | 9 +++- .../server/resourcemanager/MockNodes.java | 14 ++++-- .../server/resourcemanager/NodeManager.java | 2 +- .../capacity/TestChildQueueOrder.java | 3 +- .../scheduler/capacity/TestParentQueue.java | 2 +- .../scheduler/capacity/TestUtils.java | 2 +- .../scheduler/fair/TestFairScheduler.java | 50 +++++++++++++++++++ .../hadoop/yarn/server/MiniYARNCluster.java | 15 ++++++ 21 files changed, 156 insertions(+), 37 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ebfe2b56486..5b830b0d8c7 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -96,6 +96,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1094. Fixed a blocker with RM restart code because of which RM crashes when try to recover an existing app. (vinodkv) + YARN-1008. MiniYARNCluster with multiple nodemanagers, all nodes have same + key for allocations. (tucu) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index f57091e4380..904b4d57b7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -132,6 +132,15 @@ public class YarnConfiguration extends Configuration { RM_PREFIX + "scheduler.client.thread-count"; public static final int DEFAULT_RM_SCHEDULER_CLIENT_THREAD_COUNT = 50; + /** If the port should be included or not in the node name. The node name + * is used by the scheduler for resource requests allocation location + * matching. Typically this is just the hostname, using the port is needed + * when using minicluster and specific NM are required.*/ + public static final String RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME = + YARN_PREFIX + "scheduler.include-port-in-node-name"; + public static final boolean DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME = + false; + /** * Enable periodic monitor threads. * @see #RM_SCHEDULER_MONITOR_POLICIES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index 1ff00be4ce6..6f8144d4c69 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -281,7 +281,7 @@ public class AppSchedulingInfo { // Update future requirements nodeLocalRequest.setNumContainers(nodeLocalRequest.getNumContainers() - 1); if (nodeLocalRequest.getNumContainers() == 0) { - this.requests.get(priority).remove(node.getHostName()); + this.requests.get(priority).remove(node.getNodeName()); } ResourceRequest rackLocalRequest = requests.get(priority).get( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index a08ba7090ab..8a80bf8cf9a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.conf.YarnConfiguration; /** * Represents a YARN Cluster Node from the viewpoint of the scheduler. @@ -30,10 +31,17 @@ import org.apache.hadoop.yarn.api.records.Resource; public abstract class SchedulerNode { /** - * Get hostname. - * @return hostname + * Get the name of the node for scheduling matching decisions. + *

+ * Typically this is the 'hostname' reported by the node, but it could be + * configured to be 'hostname:port' reported by the node via the + * {@link YarnConfiguration#RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} constant. + * The main usecase of this is Yarn minicluster to be able to differentiate + * node manager instances by their port number. + * + * @return name of the node for scheduling matching decisions. */ - public abstract String getHostName(); + public abstract String getNodeName(); /** * Get rackname. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 29c4d4b9de3..2efb9ad6719 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -185,7 +185,8 @@ public class CapacityScheduler private boolean initialized = false; private ResourceCalculator calculator; - + private boolean usePortForNodeName; + public CapacityScheduler() {} @Override @@ -256,6 +257,7 @@ public class CapacityScheduler this.minimumAllocation = this.conf.getMinimumAllocation(); this.maximumAllocation = this.conf.getMaximumAllocation(); this.calculator = this.conf.getResourceCalculator(); + this.usePortForNodeName = this.conf.getUsePortForNodeName(); this.rmContext = rmContext; @@ -759,7 +761,8 @@ public class CapacityScheduler } private synchronized void addNode(RMNode nodeManager) { - this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager)); + this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager, + usePortForNodeName)); Resources.addTo(clusterResource, nodeManager.getTotalCapability()); root.updateClusterResource(clusterResource); ++numNodeManagers; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index 6d209ca1a7a..6fceabf0dec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -338,6 +338,11 @@ public class CapacitySchedulerConfiguration extends Configuration { this); } + public boolean getUsePortForNodeName() { + return getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, + YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME); + } + public void setResourceComparator( Class resourceCalculatorClass) { setClass( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index c2c5d27576d..41b3f5e3037 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -801,7 +801,7 @@ public class LeafQueue implements CSQueue { assignContainers(Resource clusterResource, FiCaSchedulerNode node) { if(LOG.isDebugEnabled()) { - LOG.debug("assignContainers: node=" + node.getHostName() + LOG.debug("assignContainers: node=" + node.getNodeName() + " #applications=" + activeApplications.size()); } @@ -1130,7 +1130,7 @@ public class LeafQueue implements CSQueue { // Data-local ResourceRequest nodeLocalResourceRequest = - application.getResourceRequest(priority, node.getHostName()); + application.getResourceRequest(priority, node.getNodeName()); if (nodeLocalResourceRequest != null) { assigned = assignNodeLocalContainers(clusterResource, nodeLocalResourceRequest, @@ -1257,7 +1257,7 @@ public class LeafQueue implements CSQueue { if (type == NodeType.NODE_LOCAL) { // Now check if we need containers on this host... ResourceRequest nodeLocalRequest = - application.getResourceRequest(priority, node.getHostName()); + application.getResourceRequest(priority, node.getNodeName()); if (nodeLocalRequest != null) { return nodeLocalRequest.getNumContainers() > 0; } @@ -1302,7 +1302,7 @@ public class LeafQueue implements CSQueue { FiCaSchedulerApp application, Priority priority, ResourceRequest request, NodeType type, RMContainer rmContainer) { if (LOG.isDebugEnabled()) { - LOG.debug("assignContainers: node=" + node.getHostName() + LOG.debug("assignContainers: node=" + node.getNodeName() + " application=" + application.getApplicationId().getId() + " priority=" + priority.getPriority() + " request=" + request + " type=" + type); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java index bb9ba92e0ee..7a306ec4281 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java @@ -59,11 +59,17 @@ public class FiCaSchedulerNode extends SchedulerNode { new HashMap(); private final RMNode rmNode; + private final String nodeName; - public FiCaSchedulerNode(RMNode node) { + public FiCaSchedulerNode(RMNode node, boolean usePortForNodeName) { this.rmNode = node; this.availableResource.setMemory(node.getTotalCapability().getMemory()); this.availableResource.setVirtualCores(node.getTotalCapability().getVirtualCores()); + if (usePortForNodeName) { + nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort(); + } else { + nodeName = rmNode.getHostName(); + } } public RMNode getRMNode() { @@ -79,8 +85,8 @@ public class FiCaSchedulerNode extends SchedulerNode { } @Override - public String getHostName() { - return this.rmNode.getHostName(); + public String getNodeName() { + return nodeName; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java index 1e96949787c..9bece9ba50e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerUtils.java @@ -24,9 +24,9 @@ public class FiCaSchedulerUtils { public static boolean isBlacklisted(FiCaSchedulerApp application, FiCaSchedulerNode node, Log LOG) { - if (application.isBlacklisted(node.getHostName())) { + if (application.isBlacklisted(node.getNodeName())) { if (LOG.isDebugEnabled()) { - LOG.debug("Skipping 'host' " + node.getHostName() + + LOG.debug("Skipping 'host' " + node.getNodeName() + " for " + application.getApplicationId() + " since it has been blacklisted"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java index bb3190bafa9..14ec99cada5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java @@ -185,7 +185,7 @@ public class AppSchedulable extends Schedulable { */ private void reserve(Priority priority, FSSchedulerNode node, Container container, boolean alreadyReserved) { - LOG.info("Making reservation: node=" + node.getHostName() + + LOG.info("Making reservation: node=" + node.getNodeName() + " app_id=" + app.getApplicationId()); if (!alreadyReserved) { getMetrics().reserveResource(app.getUser(), container.getResource()); @@ -309,7 +309,7 @@ public class AppSchedulable extends Schedulable { ResourceRequest rackLocalRequest = app.getResourceRequest(priority, node.getRackName()); ResourceRequest localRequest = app.getResourceRequest(priority, - node.getHostName()); + node.getNodeName()); if (localRequest != null && !localRequest.getRelaxLocality()) { LOG.warn("Relax locality off is not supported on local request: " @@ -369,7 +369,7 @@ public class AppSchedulable extends Schedulable { public boolean hasContainerForNode(Priority prio, FSSchedulerNode node) { ResourceRequest anyRequest = app.getResourceRequest(prio, ResourceRequest.ANY); ResourceRequest rackRequest = app.getResourceRequest(prio, node.getRackName()); - ResourceRequest nodeRequest = app.getResourceRequest(prio, node.getHostName()); + ResourceRequest nodeRequest = app.getResourceRequest(prio, node.getNodeName()); return // There must be outstanding requests at the given priority: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java index cc15a5d9c7c..bd29f821bb4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java @@ -63,10 +63,16 @@ public class FSSchedulerNode extends SchedulerNode { new HashMap(); private final RMNode rmNode; + private final String nodeName; - public FSSchedulerNode(RMNode node) { + public FSSchedulerNode(RMNode node, boolean usePortForNodeName) { this.rmNode = node; this.availableResource = Resources.clone(node.getTotalCapability()); + if (usePortForNodeName) { + nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort(); + } else { + nodeName = rmNode.getHostName(); + } } public RMNode getRMNode() { @@ -82,8 +88,8 @@ public class FSSchedulerNode extends SchedulerNode { } @Override - public String getHostName() { - return rmNode.getHostName(); + public String getNodeName() { + return nodeName; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index b86b031ecf6..7f315781ef9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -35,7 +35,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; -import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; @@ -122,6 +121,7 @@ public class FairScheduler implements ResourceScheduler { private Resource incrAllocation; private QueueManager queueMgr; private Clock clock; + private boolean usePortForNodeName; private static final Log LOG = LogFactory.getLog(FairScheduler.class); @@ -751,7 +751,7 @@ public class FairScheduler implements ResourceScheduler { } private synchronized void addNode(RMNode node) { - nodes.put(node.getNodeID(), new FSSchedulerNode(node)); + nodes.put(node.getNodeID(), new FSSchedulerNode(node, usePortForNodeName)); Resources.addTo(clusterCapacity, node.getTotalCapability()); updateRootQueueMetrics(); @@ -1065,7 +1065,8 @@ public class FairScheduler implements ResourceScheduler { sizeBasedWeight = this.conf.getSizeBasedWeight(); preemptionInterval = this.conf.getPreemptionInterval(); waitTimeBeforeKill = this.conf.getWaitTimeBeforeKill(); - + usePortForNodeName = this.conf.getUsePortForNodeName(); + if (!initialized) { rootMetrics = FSQueueMetrics.forQueue("root", null, true, conf); this.rmContext = rmContext; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java index 0ab82638dd9..acdd40e26ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java @@ -166,7 +166,12 @@ public class FairSchedulerConfiguration extends Configuration { public int getWaitTimeBeforeKill() { return getInt(WAIT_TIME_BEFORE_KILL, DEFAULT_WAIT_TIME_BEFORE_KILL); } - + + public boolean getUsePortForNodeName() { + return getBoolean(YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, + YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME); + } + /** * Parses a resource config value of a form like "1024", "1024 mb", * or "1024 mb, 3 vcores". If no units are given, megabytes are assumed. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index d971f3b4496..115d2089c34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -111,6 +111,7 @@ public class FifoScheduler implements ResourceScheduler, Configurable { private boolean initialized; private Resource minimumAllocation; private Resource maximumAllocation; + private boolean usePortForNodeName; private Map applications = new TreeMap(); @@ -233,6 +234,9 @@ public class FifoScheduler implements ResourceScheduler, Configurable { Resources.createResource(conf.getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)); + this.usePortForNodeName = conf.getBoolean( + YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, + YarnConfiguration.DEFAULT_RM_SCHEDULER_USE_PORT_FOR_NODE_NAME); this.metrics = QueueMetrics.forQueue(DEFAULT_QUEUE_NAME, null, false, conf); this.activeUsersManager = new ActiveUsersManager(metrics); @@ -490,7 +494,7 @@ public class FifoScheduler implements ResourceScheduler, Configurable { FiCaSchedulerApp application, Priority priority) { int assignedContainers = 0; ResourceRequest request = - application.getResourceRequest(priority, node.getHostName()); + application.getResourceRequest(priority, node.getNodeName()); if (request != null) { // Don't allocate on this node if we don't need containers on this rack ResourceRequest rackRequest = @@ -801,7 +805,8 @@ public class FifoScheduler implements ResourceScheduler, Configurable { } private synchronized void addNode(RMNode nodeManager) { - this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager)); + this.nodes.put(nodeManager.getNodeID(), new FiCaSchedulerNode(nodeManager, + usePortForNodeName)); Resources.addTo(clusterResource, nodeManager.getTotalCapability()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 83b81a18504..d69828d0fc4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -200,15 +200,14 @@ public class MockNodes { }; private static RMNode buildRMNode(int rack, final Resource perNode, NodeState state, String httpAddr) { - return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++, null); + return buildRMNode(rack, perNode, state, httpAddr, NODE_ID++, null, 123); } private static RMNode buildRMNode(int rack, final Resource perNode, - NodeState state, String httpAddr, int hostnum, String hostName) { + NodeState state, String httpAddr, int hostnum, String hostName, int port) { final String rackName = "rack"+ rack; final int nid = hostnum; final String nodeAddr = hostName + ":" + nid; - final int port = 123; if (hostName == null) { hostName = "host"+ nid; } @@ -230,12 +229,17 @@ public class MockNodes { } public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum) { - return buildRMNode(rack, perNode, null, "localhost:0", hostnum, null); + return buildRMNode(rack, perNode, null, "localhost:0", hostnum, null, 123); } public static RMNode newNodeInfo(int rack, final Resource perNode, int hostnum, String hostName) { - return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName); + return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName, 123); + } + + public static RMNode newNodeInfo(int rack, final Resource perNode, + int hostnum, String hostName, int port) { + return buildRMNode(rack, perNode, null, "localhost:0", hostnum, hostName, port); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index 2c9d67845a4..f943101e1cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -101,7 +101,7 @@ public class NodeManager implements ContainerManagementProtocol { request.setNodeId(this.nodeId); resourceTrackerService.registerNodeManager(request); this.schedulerNode = new FiCaSchedulerNode(rmContext.getRMNodes().get( - this.nodeId)); + this.nodeId), false); // Sanity check Assert.assertEquals(capability.getMemory(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index 014385c12eb..3c55b42006f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -26,7 +26,6 @@ import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.spy; import static org.mockito.Mockito.when; import java.util.HashMap; @@ -126,7 +125,7 @@ public class TestChildQueueOrder { throw new Exception(); } catch (Exception e) { LOG.info("FOOBAR q.assignContainers q=" + queue.getQueueName() + - " alloc=" + allocation + " node=" + node.getHostName()); + " alloc=" + allocation + " node=" + node.getNodeName()); } final Resource allocatedResource = Resources.createResource(allocation); if (queue instanceof ParentQueue) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index c5dbfde5d3d..03480810083 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -138,7 +138,7 @@ public class TestParentQueue { throw new Exception(); } catch (Exception e) { LOG.info("FOOBAR q.assignContainers q=" + queue.getQueueName() + - " alloc=" + allocation + " node=" + node.getHostName()); + " alloc=" + allocation + " node=" + node.getNodeName()); } final Resource allocatedResource = Resources.createResource(allocation); if (queue instanceof ParentQueue) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 6e7fe789826..b974528a3cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -160,7 +160,7 @@ public class TestUtils { when(rmNode.getHostName()).thenReturn(host); when(rmNode.getRackName()).thenReturn(rack); - FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode)); + FiCaSchedulerNode node = spy(new FiCaSchedulerNode(rmNode, false)); LOG.info("node = " + host + " avail=" + node.getAvailableResource()); return node; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 12c40b9d264..17ff7ded850 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -2146,4 +2146,54 @@ public class TestFairScheduler { Assert.assertEquals(2, app3.getLiveContainers().size()); Assert.assertEquals(2, app4.getLiveContainers().size()); } + + @Test(timeout = 30000) + public void testHostPortNodeName() throws Exception { + scheduler.getConf().setBoolean(YarnConfiguration + .RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME, true); + scheduler.reinitialize(scheduler.getConf(), + resourceManager.getRMContext()); + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024), + 1, "127.0.0.1", 1); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024), + 2, "127.0.0.1", 2); + NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); + scheduler.handle(nodeEvent2); + + ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", + "user1", 0); + + ResourceRequest nodeRequest = createResourceRequest(1024, + node1.getNodeID().getHost() + ":" + node1.getNodeID().getPort(), 1, + 1, true); + ResourceRequest rackRequest = createResourceRequest(1024, + node1.getRackName(), 1, 1, false); + ResourceRequest anyRequest = createResourceRequest(1024, + ResourceRequest.ANY, 1, 1, false); + createSchedulingRequestExistingApplication(nodeRequest, attId1); + createSchedulingRequestExistingApplication(rackRequest, attId1); + createSchedulingRequestExistingApplication(anyRequest, attId1); + + scheduler.update(); + + NodeUpdateSchedulerEvent node1UpdateEvent = new + NodeUpdateSchedulerEvent(node1); + NodeUpdateSchedulerEvent node2UpdateEvent = new + NodeUpdateSchedulerEvent(node2); + + // no matter how many heartbeats, node2 should never get a container + FSSchedulerApp app = scheduler.applications.get(attId1); + for (int i = 0; i < 10; i++) { + scheduler.handle(node2UpdateEvent); + assertEquals(0, app.getLiveContainers().size()); + assertEquals(0, app.getReservedContainers().size()); + } + // then node1 should get the container + scheduler.handle(node1UpdateEvent); + assertEquals(1, app.getLiveContainers().size()); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java index cba27e4b323..1281c2409bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java @@ -53,6 +53,21 @@ import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService; +/** + * Embedded Yarn minicluster for testcases that need to interact with a cluster. + *

+ * In a real cluster, resource request matching is done using the hostname, and + * by default Yarn minicluster works in the exact same way as a real cluster. + *

+ * If a testcase needs to use multiple nodes and exercise resource request + * matching to a specific node, then the property + * {@YarnConfiguration.RM_SCHEDULER_INCLUDE_PORT_IN_NODE_NAME} should be set + * true in the configuration used to initialize the minicluster. + *

+ * With this property set to true, the matching will be done using + * the hostname:port of the namenodes. In such case, the AM must + * do resource request using hostname:port as the location. + */ public class MiniYARNCluster extends CompositeService { private static final Log LOG = LogFactory.getLog(MiniYARNCluster.class); From e7154d7855a551656e373f6deda1ddc0e59b796b Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Mon, 26 Aug 2013 19:52:10 +0000 Subject: [PATCH 072/153] YARN-1093. Corrections to Fair Scheduler documentation (Wing Yew Poon via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517658 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../src/site/apt/FairScheduler.apt.vm | 29 ++++++++++++------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 5b830b0d8c7..5c7db3fe58d 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -48,6 +48,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1074. Cleaned up YARN CLI application list to only display running applications by default. (Xuan Gong via vinodkv) + YARN-1093. Corrections to Fair Scheduler documentation (Wing Yew Poon via + Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index a5adb4e838c..37b63e5962c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -47,7 +47,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler The scheduler organizes apps further into "queues", and shares resources fairly between these queues. By default, all users share a single queue, - called “default”. If an app specifically lists a queue in a container resource + named “default”. If an app specifically lists a queue in a container resource request, the request is submitted to that queue. It is also possible to assign queues based on the user name included with the request through configuration. Within each queue, a scheduling policy is used to share @@ -85,7 +85,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler their parents in the fair scheduler configuration file. A queue's name starts with the names of its parents, with periods as - separators. So a queue named "queue1" under the root named, would be referred + separators. So a queue named "queue1" under the root queue, would be referred to as "root.queue1", and a queue named "queue2" under a queue named "parent1" would be referred to as "root.parent1.queue2". When referring to queues, the root part of the name is optional, so queue1 could be referred to as just @@ -118,22 +118,23 @@ Hadoop MapReduce Next Generation - Fair Scheduler Customizing the Fair Scheduler typically involves altering two files. First, scheduler-wide options can be set by adding configuration properties in the - fair-scheduler.xml file in your existing configuration directory. Second, in + yarn-site.xml file in your existing configuration directory. Second, in most cases users will want to create a manifest file listing which queues exist and their respective weights and capacities. The location of this file - is flexible - but it must be declared in fair-scheduler.xml. + is flexible - but it must be declared in yarn-site.xml. * <<>> * Path to allocation file. An allocation file is an XML manifest describing queues and their properties, in addition to certain policy defaults. This file must be in XML format as described in the next section. + Defaults to fair-scheduler.xml in configuration directory. * <<>> * Whether to use the username associated with the allocation as the default queue name, in the event that a queue name is not specified. If this is set - to "false" or unset, all jobs have a shared default queue, called "default". + to "false" or unset, all jobs have a shared default queue, named "default". Defaults to true. * <<>> @@ -178,14 +179,15 @@ Hadoop MapReduce Next Generation - Fair Scheduler Allocation file format - The allocation file must be in XML format. The format contains four types of + The allocation file must be in XML format. The format contains five types of elements: * <>, which represent queues. Each may contain the following properties: * minResources: minimum resources the queue is entitled to, in the form - "X mb, Y vcores". If a queue's minimum share is not satisfied, it will be + "X mb, Y vcores". For the single-resource fairness policy, the vcores + value is ignored. If a queue's minimum share is not satisfied, it will be offered available resources before any other queue under the same parent. Under the single-resource fairness policy, a queue is considered unsatisfied if its memory usage is below its minimum memory @@ -199,7 +201,8 @@ Allocation file format may be using those resources. * maxResources: maximum resources a queue is allowed, in the form - "X mb, Y vcores". A queue will never be assigned a container that would + "X mb, Y vcores". For the single-resource fairness policy, the vcores + value is ignored. A queue will never be assigned a container that would put its aggregate usage over this limit. * maxRunningApps: limit the number of apps from the queue to run at once @@ -234,19 +237,23 @@ Allocation file format its fair share before it will try to preempt containers to take resources from other queues. + * <>, which sets the default scheduling + policy for queues; overriden by the schedulingPolicy element in each queue + if specified. Defaults to "fair". + An example allocation file is given here: --- - 10000 mb - 90000 mb + 10000 mb,0vcores + 90000 mb,0vcores 50 2.0 fair - 5000 mb + 5000 mb,0vcores From f9f7b792fb600df9740e9d996183d67b3148d126 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Mon, 26 Aug 2013 20:47:54 +0000 Subject: [PATCH 073/153] YARN-942. In Fair Scheduler documentation, inconsistency on which properties have prefix (Akira Ajisaka via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517691 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 5c7db3fe58d..140e97ca133 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -51,6 +51,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1093. Corrections to Fair Scheduler documentation (Wing Yew Poon via Sandy Ryza) + YARN-942. In Fair Scheduler documentation, inconsistency on which + properties have prefix (Akira Ajisaka via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index 37b63e5962c..5d8083423f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -159,7 +159,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler * If assignmultiple is true, the maximum amount of containers that can be assigned in one heartbeat. Defaults to -1, which sets no limit. - * <<>> + * <<>> * For applications that request containers on particular nodes, the number of scheduling opportunities since the last container assignment to wait before @@ -168,7 +168,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler opportunities to pass up. The default value of -1.0 means don't pass up any scheduling opportunities. - * <<>> + * <<>> * For applications that request containers on particular racks, the number of scheduling opportunities since the last container assignment to wait before From 5adba5597ce071c2e84d0c9834e1d9e5e76f9bdb Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 26 Aug 2013 22:59:09 +0000 Subject: [PATCH 074/153] YARN-1085. Addendum patch to address issues with the earlier patch. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517721 13f79535-47bb-0310-9956-ffa450edef68 --- .../mapreduce/v2/jobhistory/JHAdminConfig.java | 9 +++++++++ .../mapreduce/v2/hs/HistoryClientService.java | 4 ++-- .../apache/hadoop/yarn/conf/YarnConfiguration.java | 14 ++++++-------- .../org/apache/hadoop/yarn/webapp/WebApps.java | 6 ++++-- .../server/resourcemanager/ResourceManager.java | 2 +- 5 files changed, 22 insertions(+), 13 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java index 1236d574ed5..a955eaa3c23 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java @@ -129,6 +129,15 @@ public class JHAdminConfig { public static final int DEFAULT_MR_HISTORY_WEBAPP_PORT = 19888; public static final String DEFAULT_MR_HISTORY_WEBAPP_ADDRESS = "0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_PORT; + + /**The kerberos principal to be used for spnego filter for history server*/ + public static final String MR_WEBAPP_SPNEGO_USER_NAME_KEY = + MR_HISTORY_PREFIX + "webapp.spnego-principal"; + + /** The kerberos keytab to be used for spnego filter for history server*/ + public static final String MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = + MR_HISTORY_PREFIX + "webapp.spnego-keytab-file"; + /* * HS Service Authorization */ diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java index 2f0f2c2c2a5..87fb1ed41ee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryClientService.java @@ -153,9 +153,9 @@ public class HistoryClientService extends AbstractService { .$for("jobhistory", HistoryClientService.class, this, "ws") .with(conf) .withHttpSpnegoKeytabKey( - YarnConfiguration.JHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) + JHAdminConfig.MR_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) .withHttpSpnegoPrincipalKey( - YarnConfiguration.JHS_WEBAPP_SPNEGO_USER_NAME_KEY) + JHAdminConfig.MR_WEBAPP_SPNEGO_USER_NAME_KEY) .at(NetUtils.getHostPortString(bindAddress)).start(webApp); conf.updateConnectAddr(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS, webApp.getListenerAddress()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 904b4d57b7f..febf095d227 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -214,11 +214,13 @@ public class YarnConfiguration extends Configuration { public static final String RM_KEYTAB = RM_PREFIX + "keytab"; + /**The kerberos principal to be used for spnego filter for RM.*/ public static final String RM_WEBAPP_SPNEGO_USER_NAME_KEY = RM_PREFIX + "webapp.spnego-principal"; - public static final String RM_WEBAPP_SPENGO_KEYTAB_FILE_KEY = - RM_PREFIX + "webapp.spengo-keytab-file"; + /**The kerberos keytab to be used for spnego filter for RM.*/ + public static final String RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = + RM_PREFIX + "webapp.spnego-keytab-file"; /** How long to wait until a container is considered dead.*/ public static final String RM_CONTAINER_ALLOC_EXPIRY_INTERVAL_MS = @@ -615,9 +617,11 @@ public class YarnConfiguration extends Configuration { public static final String NM_USER_HOME_DIR = NM_PREFIX + "user-home-dir"; + /**The kerberos principal to be used for spnego filter for NM.*/ public static final String NM_WEBAPP_SPNEGO_USER_NAME_KEY = NM_PREFIX + "webapp.spnego-principal"; + /**The kerberos keytab to be used for spnego filter for NM.*/ public static final String NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = NM_PREFIX + "webapp.spnego-keytab-file"; @@ -750,12 +754,6 @@ public class YarnConfiguration extends Configuration { // Other Configs //////////////////////////////// - public static final String JHS_WEBAPP_SPNEGO_USER_NAME_KEY = - "jobhistoryserver.webapp.spnego-principal"; - - public static final String JHS_WEBAPP_SPNEGO_KEYTAB_FILE_KEY = - "jobhistoryserver.webapp.spnego-keytab-file"; - /** * The interval of the yarn client's querying application state after * application submission. The unit is millisecond. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 87622c2b4f6..1a093a39cc6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -217,11 +217,13 @@ public class WebApps { { if (UserGroupInformation.isSecurityEnabled()) { boolean initSpnego = true; - if (spnegoPrincipalKey == null || spnegoPrincipalKey.isEmpty()) { + if (spnegoPrincipalKey == null + || conf.get(spnegoPrincipalKey, "").isEmpty()) { LOG.warn("Principal for spnego filter is not set"); initSpnego = false; } - if (spnegoKeytabKey == null || spnegoKeytabKey.isEmpty()) { + if (spnegoKeytabKey == null + || conf.get(spnegoKeytabKey, "").isEmpty()) { LOG.warn("Keytab for spnego filter is not set"); initSpnego = false; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index ed30331c8af..95e3207e4b2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -584,7 +584,7 @@ public class ResourceManager extends CompositeService implements Recoverable { .withHttpSpnegoPrincipalKey( YarnConfiguration.RM_WEBAPP_SPNEGO_USER_NAME_KEY) .withHttpSpnegoKeytabKey( - YarnConfiguration.RM_WEBAPP_SPENGO_KEYTAB_FILE_KEY) + YarnConfiguration.RM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY) .at(this.conf.get(YarnConfiguration.RM_WEBAPP_ADDRESS, YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS)); String proxyHostAndPort = YarnConfiguration.getProxyHostAndPort(conf); From 39252995c4d734e993e3fa5338e1a7816aee86fc Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Tue, 27 Aug 2013 19:21:15 +0000 Subject: [PATCH 075/153] HDFS-3245. Add metrics and web UI for cluster version summary. Contributed by Ravi Prakash. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517937 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hadoop/hdfs/protocol/DatanodeInfo.java | 9 +++ .../blockmanagement/DatanodeManager.java | 79 ++++++++++++++++++- .../server/namenode/ClusterJspHelper.java | 3 + .../hdfs/server/namenode/FSNamesystem.java | 17 ++++ .../hdfs/server/namenode/NameNodeMXBean.java | 25 +++++- .../server/namenode/NamenodeJspHelper.java | 23 +++++- .../src/main/webapps/hdfs/dfshealth.jsp | 1 + 8 files changed, 155 insertions(+), 5 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 644699f67f9..bb937935bdd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -333,6 +333,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5045. Add more unit tests for retry cache to cover all AtMostOnce methods. (jing9) + HDFS-3245. Add metrics and web UI for cluster version summary. (Ravi + Prakash via kihwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java index 5172bc59f24..3f5715b0afd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java @@ -47,6 +47,7 @@ public class DatanodeInfo extends DatanodeID implements Node { private long lastUpdate; private int xceiverCount; private String location = NetworkTopology.DEFAULT_RACK; + private String softwareVersion; // Datanode administrative states public enum AdminStates { @@ -383,4 +384,12 @@ public class DatanodeInfo extends DatanodeID implements Node { // by DatanodeID return (this == obj) || super.equals(obj); } + + public String getSoftwareVersion() { + return softwareVersion; + } + + public void setSoftwareVersion(String softwareVersion) { + this.softwareVersion = softwareVersion; + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index 9d5024fb9b1..f9b7d6edbd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -26,6 +26,7 @@ import java.net.UnknownHostException; import java.util.ArrayList; import java.util.Arrays; import java.util.Comparator; +import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.NavigableMap; @@ -64,7 +65,6 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException; import org.apache.hadoop.hdfs.server.protocol.RegisterCommand; import org.apache.hadoop.hdfs.util.CyclicIteration; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.CachedDNSToSwitchMapping; import org.apache.hadoop.net.DNSToSwitchMapping; @@ -165,6 +165,14 @@ public class DatanodeManager { * according to the NetworkTopology. */ private boolean hasClusterEverBeenMultiRack = false; + + /** + * The number of datanodes for each software version. This list should change + * during rolling upgrades. + * Software version -> Number of datanodes with this version + */ + private HashMap datanodesSoftwareVersions = + new HashMap(4, 0.75f); DatanodeManager(final BlockManager blockManager, final Namesystem namesystem, final Configuration conf) throws IOException { @@ -456,6 +464,7 @@ public class DatanodeManager { heartbeatManager.removeDatanode(nodeInfo); blockManager.removeBlocksAssociatedTo(nodeInfo); networktopology.remove(nodeInfo); + decrementVersionCount(nodeInfo.getSoftwareVersion()); if (LOG.isDebugEnabled()) { LOG.debug("remove datanode " + nodeInfo); @@ -538,6 +547,61 @@ public class DatanodeManager { } } + private void incrementVersionCount(String version) { + if (version == null) { + return; + } + synchronized(datanodeMap) { + Integer count = this.datanodesSoftwareVersions.get(version); + count = count == null ? 1 : count + 1; + this.datanodesSoftwareVersions.put(version, count); + } + } + + private void decrementVersionCount(String version) { + if (version == null) { + return; + } + synchronized(datanodeMap) { + Integer count = this.datanodesSoftwareVersions.get(version); + if(count != null) { + if(count > 1) { + this.datanodesSoftwareVersions.put(version, count-1); + } else { + this.datanodesSoftwareVersions.remove(version); + } + } + } + } + + private boolean shouldCountVersion(DatanodeDescriptor node) { + return node.getSoftwareVersion() != null && node.isAlive && + !isDatanodeDead(node); + } + + private void countSoftwareVersions() { + synchronized(datanodeMap) { + HashMap versionCount = new HashMap(); + for(DatanodeDescriptor dn: datanodeMap.values()) { + // Check isAlive too because right after removeDatanode(), + // isDatanodeDead() is still true + if(shouldCountVersion(dn)) + { + Integer num = versionCount.get(dn.getSoftwareVersion()); + num = num == null ? 1 : num+1; + versionCount.put(dn.getSoftwareVersion(), num); + } + } + this.datanodesSoftwareVersions = versionCount; + } + } + + public HashMap getDatanodesSoftwareVersions() { + synchronized(datanodeMap) { + return new HashMap (this.datanodesSoftwareVersions); + } + } + /* Resolve a node's network location */ private String resolveNetworkLocation (DatanodeID node) { List names = new ArrayList(1); @@ -755,21 +819,28 @@ public class DatanodeManager { try { // update cluster map getNetworkTopology().remove(nodeS); + if(shouldCountVersion(nodeS)) { + decrementVersionCount(nodeS.getSoftwareVersion()); + } nodeS.updateRegInfo(nodeReg); + + nodeS.setSoftwareVersion(nodeReg.getSoftwareVersion()); nodeS.setDisallowed(false); // Node is in the include list - + // resolve network location nodeS.setNetworkLocation(resolveNetworkLocation(nodeS)); getNetworkTopology().add(nodeS); // also treat the registration message as a heartbeat heartbeatManager.register(nodeS); + incrementVersionCount(nodeS.getSoftwareVersion()); checkDecommissioning(nodeS); success = true; } finally { if (!success) { removeDatanode(nodeS); wipeDatanode(nodeS); + countSoftwareVersions(); } } return; @@ -793,6 +864,7 @@ public class DatanodeManager { try { nodeDescr.setNetworkLocation(resolveNetworkLocation(nodeDescr)); networktopology.add(nodeDescr); + nodeDescr.setSoftwareVersion(nodeReg.getSoftwareVersion()); // register new datanode addDatanode(nodeDescr); @@ -803,10 +875,12 @@ public class DatanodeManager { // because its is done when the descriptor is created heartbeatManager.addDatanode(nodeDescr); success = true; + incrementVersionCount(nodeReg.getSoftwareVersion()); } finally { if (!success) { removeDatanode(nodeDescr); wipeDatanode(nodeDescr); + countSoftwareVersions(); } } } catch (InvalidTopologyException e) { @@ -828,6 +902,7 @@ public class DatanodeManager { namesystem.writeLock(); try { refreshDatanodes(); + countSoftwareVersions(); } finally { namesystem.writeUnlock(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java index 0f0a989f8c9..e158d9442d1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java @@ -359,6 +359,7 @@ class ClusterJspHelper { nn.httpAddress = httpAddress; getLiveNodeCount(getProperty(props, "LiveNodes").getValueAsText(), nn); getDeadNodeCount(getProperty(props, "DeadNodes").getValueAsText(), nn); + nn.softwareVersion = getProperty(props, "SoftwareVersion").getTextValue(); return nn; } @@ -596,6 +597,7 @@ class ClusterJspHelper { toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" + nn.deadDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=DEAD" , "Dead Datanode (Decommissioned)"); + toXmlItemBlock(doc, "Software Version", nn.softwareVersion); doc.endTag(); // node } doc.endTag(); // namenodes @@ -624,6 +626,7 @@ class ClusterJspHelper { int deadDatanodeCount = 0; int deadDecomCount = 0; String httpAddress = null; + String softwareVersion = ""; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 030893028c4..569f2165c7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6225,6 +6225,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, innerinfo.put("nonDfsUsedSpace", node.getNonDfsUsed()); innerinfo.put("capacity", node.getCapacity()); innerinfo.put("numBlocks", node.numBlocks()); + innerinfo.put("version", node.getSoftwareVersion()); info.put(node.getHostName(), innerinfo); } return JSON.toString(info); @@ -6436,6 +6437,22 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return JSON.toString(list); } + @Override //NameNodeMXBean + public int getDistinctVersionCount() { + return blockManager.getDatanodeManager().getDatanodesSoftwareVersions() + .size(); + } + + @Override //NameNodeMXBean + public Map getDistinctVersions() { + return blockManager.getDatanodeManager().getDatanodesSoftwareVersions(); + } + + @Override //NameNodeMXBean + public String getSoftwareVersion() { + return VersionInfo.getVersion(); + } + /** * Verifies that the given identifier and password are valid and match. * @param identifier Token identifier. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java index 173d5aea4c7..ff2e3ea10dd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeMXBean.java @@ -17,6 +17,8 @@ */ package org.apache.hadoop.hdfs.server.namenode; +import java.util.Map; + import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -33,7 +35,13 @@ public interface NameNodeMXBean { * @return the version */ public String getVersion(); - + + /** + * Get the version of software running on the Namenode + * @return a string representing the version + */ + public String getSoftwareVersion(); + /** * Gets the used space by data nodes. * @@ -215,4 +223,19 @@ public interface NameNodeMXBean { * @return the list of corrupt files, as a JSON string. */ public String getCorruptFiles(); + + /** + * Get the number of distinct versions of live datanodes + * + * @return the number of distinct versions of live datanodes + */ + public int getDistinctVersionCount(); + + /** + * Get the number of live datanodes for each distinct versions + * + * @return the number of live datanodes for each distinct versions + */ + public Map getDistinctVersions(); + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 1989784ba53..36163c7d0bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -32,6 +32,7 @@ import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; +import java.util.Map; import javax.servlet.ServletContext; import javax.servlet.http.HttpServletRequest; @@ -99,6 +100,20 @@ class NamenodeJspHelper { } } + static String getRollingUpgradeText(FSNamesystem fsn) { + DatanodeManager dm = fsn.getBlockManager().getDatanodeManager(); + Map list = dm.getDatanodesSoftwareVersions(); + if(list.size() > 1) { + StringBuffer status = new StringBuffer("Rolling upgrades in progress. " + + "There are " + list.size() + " versions of datanodes currently live: "); + for(Map.Entry ver: list.entrySet()) { + status.append(ver.getKey() + "(" + ver.getValue() + "), "); + } + return status.substring(0, status.length()-2); + } + return ""; + } + static String getInodeLimitText(FSNamesystem fsn) { if (fsn == null) { return ""; @@ -802,7 +817,9 @@ class NamenodeJspHelper { + "" + percentBpUsed + "" - + d.getVolumeFailures() + "\n"); + + d.getVolumeFailures() + + "" + + d.getSoftwareVersion() + "\n"); } void generateNodesList(ServletContext context, JspWriter out, @@ -900,7 +917,9 @@ class NamenodeJspHelper { + nodeHeaderStr("pcbpused") + "> Block Pool
Used (%)" + " Failed Volumes\n"); + +"> Failed Volumes Version\n"); JspHelper.sortNodeList(live, sorterField, sorterOrder); for (int i = 0; i < live.size(); i++) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp index 1201b95ff75..0b0091e10d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.jsp @@ -65,6 +65,7 @@

Cluster Summary

<%= NamenodeJspHelper.getSecurityModeText()%> <%= NamenodeJspHelper.getSafeModeText(fsn)%> + <%= NamenodeJspHelper.getRollingUpgradeText(fsn)%> <%= NamenodeJspHelper.getInodeLimitText(fsn)%> <%= NamenodeJspHelper.getCorruptFilesWarning(fsn)%> From 10a62366a57e2c7f7ee4d47e83b60fb5a5b71200 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Tue, 27 Aug 2013 20:53:31 +0000 Subject: [PATCH 076/153] HDFS-5128. Allow multiple net interfaces to be used with HA namenode RPC server. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517981 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../hadoop/hdfs/server/namenode/NameNode.java | 24 +++++++ .../server/namenode/NameNodeRpcServer.java | 33 ++++++++-- .../src/main/resources/hdfs-default.xml | 24 +++++++ .../namenode/TestNameNodeRpcServer.java | 63 +++++++++++++++++++ 6 files changed, 145 insertions(+), 4 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bb937935bdd..0a68720d607 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -336,6 +336,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-3245. Add metrics and web UI for cluster version summary. (Ravi Prakash via kihwal) + HDFS-5128. Allow multiple net interfaces to be used with HA namenode RPC + server. (kihwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index 91622514da7..a66ec939613 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -104,7 +104,9 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_NAMENODE_HTTP_ADDRESS_KEY = "dfs.namenode.http-address"; public static final String DFS_NAMENODE_HTTP_ADDRESS_DEFAULT = "0.0.0.0:" + DFS_NAMENODE_HTTP_PORT_DEFAULT; public static final String DFS_NAMENODE_RPC_ADDRESS_KEY = "dfs.namenode.rpc-address"; + public static final String DFS_NAMENODE_RPC_BIND_HOST_KEY = "dfs.namenode.rpc-bind-host"; public static final String DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY = "dfs.namenode.servicerpc-address"; + public static final String DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY = "dfs.namenode.servicerpc-bind-host"; public static final String DFS_NAMENODE_MAX_OBJECTS_KEY = "dfs.namenode.max.objects"; public static final long DFS_NAMENODE_MAX_OBJECTS_DEFAULT = 0; public static final String DFS_NAMENODE_SAFEMODE_EXTENSION_KEY = "dfs.namenode.safemode.extension"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index b8a51390c11..d2ef66974dc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -166,12 +166,14 @@ public class NameNode implements NameNodeStatusMXBean { */ public static final String[] NAMENODE_SPECIFIC_KEYS = { DFS_NAMENODE_RPC_ADDRESS_KEY, + DFS_NAMENODE_RPC_BIND_HOST_KEY, DFS_NAMENODE_NAME_DIR_KEY, DFS_NAMENODE_EDITS_DIR_KEY, DFS_NAMENODE_SHARED_EDITS_DIR_KEY, DFS_NAMENODE_CHECKPOINT_DIR_KEY, DFS_NAMENODE_CHECKPOINT_EDITS_DIR_KEY, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, + DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY, DFS_NAMENODE_HTTP_ADDRESS_KEY, DFS_NAMENODE_KEYTAB_FILE_KEY, DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, @@ -387,6 +389,28 @@ public class NameNode implements NameNodeStatusMXBean { return getAddress(conf); } + /** Given a configuration get the bind host of the service rpc server + * If the bind host is not configured returns null. + */ + protected String getServiceRpcServerBindHost(Configuration conf) { + String addr = conf.getTrimmed(DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY); + if (addr == null || addr.isEmpty()) { + return null; + } + return addr; + } + + /** Given a configuration get the bind host of the client rpc server + * If the bind host is not configured returns null. + */ + protected String getRpcServerBindHost(Configuration conf) { + String addr = conf.getTrimmed(DFS_NAMENODE_RPC_BIND_HOST_KEY); + if (addr == null || addr.isEmpty()) { + return null; + } + return addr; + } + /** * Modifies the configuration passed to contain the service rpc address setting */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 0a3309fdbb4..2d729e69e1f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -136,6 +136,7 @@ import org.apache.hadoop.tools.protocolPB.GetUserMappingsProtocolServerSideTrans import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.util.VersionUtil; +import com.google.common.annotations.VisibleForTesting; import com.google.protobuf.BlockingService; /** @@ -218,6 +219,13 @@ class NameNodeRpcServer implements NamenodeProtocols { InetSocketAddress serviceRpcAddr = nn.getServiceRpcServerAddress(conf); if (serviceRpcAddr != null) { + String bindHost = nn.getServiceRpcServerBindHost(conf); + if (bindHost == null) { + bindHost = serviceRpcAddr.getHostName(); + } + LOG.info("Service RPC server is binding to " + bindHost + ":" + + serviceRpcAddr.getPort()); + int serviceHandlerCount = conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY, DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT); @@ -225,7 +233,7 @@ class NameNodeRpcServer implements NamenodeProtocols { .setProtocol( org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) .setInstance(clientNNPbService) - .setBindAddress(serviceRpcAddr.getHostName()) + .setBindAddress(bindHost) .setPort(serviceRpcAddr.getPort()) .setNumHandlers(serviceHandlerCount) .setVerbose(false) @@ -246,7 +254,10 @@ class NameNodeRpcServer implements NamenodeProtocols { DFSUtil.addPBProtocol(conf, GetUserMappingsProtocolPB.class, getUserMappingService, serviceRpcServer); - serviceRPCAddress = serviceRpcServer.getListenerAddress(); + // Update the address with the correct port + InetSocketAddress listenAddr = serviceRpcServer.getListenerAddress(); + serviceRPCAddress = new InetSocketAddress( + serviceRpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcServiceServerAddress(conf, serviceRPCAddress); } else { serviceRpcServer = null; @@ -254,11 +265,17 @@ class NameNodeRpcServer implements NamenodeProtocols { } InetSocketAddress rpcAddr = nn.getRpcServerAddress(conf); + String bindHost = nn.getRpcServerBindHost(conf); + if (bindHost == null) { + bindHost = rpcAddr.getHostName(); + } + LOG.info("RPC server is binding to " + bindHost + ":" + rpcAddr.getPort()); + clientRpcServer = new RPC.Builder(conf) .setProtocol( org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolPB.class) .setInstance(clientNNPbService) - .setBindAddress(rpcAddr.getHostName()) + .setBindAddress(bindHost) .setPort(rpcAddr.getPort()) .setNumHandlers(handlerCount) .setVerbose(false) @@ -290,7 +307,9 @@ class NameNodeRpcServer implements NamenodeProtocols { } // The rpc-server port can be ephemeral... ensure we have the correct info - clientRpcAddress = clientRpcServer.getListenerAddress(); + InetSocketAddress listenAddr = clientRpcServer.getListenerAddress(); + clientRpcAddress = new InetSocketAddress( + rpcAddr.getHostName(), listenAddr.getPort()); nn.setRpcServerAddress(conf, clientRpcAddress); minimumDataNodeVersion = conf.get( @@ -314,6 +333,12 @@ class NameNodeRpcServer implements NamenodeProtocols { NSQuotaExceededException.class, DSQuotaExceededException.class); } + + /** Allow access to the client RPC server for testing */ + @VisibleForTesting + RPC.Server getClientRpcServer() { + return clientRpcServer; + } /** * Start client and service RPC servers. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml index 06eca701264..f25245e36f7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml @@ -51,6 +51,18 @@ + + dfs.namenode.rpc-bind-host + + + The actual address the server will bind to. If this optional address is + set, the RPC server will bind to this address and the port specified in + dfs.namenode.rpc-address for the RPC server. It can also be specified + per name node or name service for HA/Federation. This is most useful for + making name node listen to all interfaces by setting to 0.0.0.0. + + + dfs.namenode.servicerpc-address @@ -64,6 +76,18 @@ + + dfs.namenode.servicerpc-bind-host + + + The actual address the server will bind to. If this optional address is + set, the service RPC server will bind to this address and the port + specified in dfs.namenode.servicerpc-address. It can also be specified + per name node or name service for HA/Federation. This is most useful for + making name node listen to all interfaces by setting to 0.0.0.0. + + + dfs.namenode.secondary.http-address 0.0.0.0:50090 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java new file mode 100644 index 00000000000..ada93e84f0e --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRpcServer.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* + * Test the MiniDFSCluster functionality that allows "dfs.datanode.address", + * "dfs.datanode.http.address", and "dfs.datanode.ipc.address" to be + * configurable. The MiniDFSCluster.startDataNodes() API now has a parameter + * that will check these properties if told to do so. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_BIND_HOST_KEY; +import static org.junit.Assert.assertEquals; + +import java.io.IOException; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; + +import org.junit.Test; + +public class TestNameNodeRpcServer { + + @Test + public void testNamenodeRpcBindAny() throws IOException { + Configuration conf = new HdfsConfiguration(); + + // The name node in MiniDFSCluster only binds to 127.0.0.1. + // We can set the bind address to 0.0.0.0 to make it listen + // to all interfaces. + conf.set(DFS_NAMENODE_RPC_BIND_HOST_KEY, "0.0.0.0"); + MiniDFSCluster cluster = null; + + try { + cluster = new MiniDFSCluster.Builder(conf).build(); + cluster.waitActive(); + assertEquals("0.0.0.0", ((NameNodeRpcServer)cluster.getNameNodeRpc()) + .getClientRpcServer().getListenerAddress().getHostName()); + } finally { + if (cluster != null) { + cluster.shutdown(); + } + // Reset the config + conf.unset(DFS_NAMENODE_RPC_BIND_HOST_KEY); + } + } +} + From 1bfcab9689588c6add9dcf1caad59ad68a3d1866 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Tue, 27 Aug 2013 21:04:24 +0000 Subject: [PATCH 077/153] HDFS-5132. Deadlock in NameNode between SafeModeMonitor#run and DatanodeManager#handleHeartbeat. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517989 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/FSNamesystem.java | 19 +++++++++++++++---- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0a68720d607..48ebda3437d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -395,6 +395,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5124. DelegationTokenSecretManager#retrievePassword can cause deadlock in NameNode. (Daryn Sharp via jing9) + HDFS-5132. Deadlock in NameNode between SafeModeMonitor#run and + DatanodeManager#handleHeartbeat. (kihwal) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 569f2165c7a..178f3d9b087 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -4798,7 +4798,21 @@ public class FSNamesystem implements Namesystem, FSClusterStats, */ @Override public void run() { - while (fsRunning && (safeMode != null && !safeMode.canLeave())) { + while (fsRunning) { + writeLock(); + try { + if (safeMode == null) { // Not in safe mode. + break; + } + if (safeMode.canLeave()) { + // Leave safe mode. + safeMode.leave(); + break; + } + } finally { + writeUnlock(); + } + try { Thread.sleep(recheckInterval); } catch (InterruptedException ie) { @@ -4807,9 +4821,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } if (!fsRunning) { LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread"); - } else { - // leave safe mode and stop the monitor - leaveSafeMode(); } smmthread = null; } From dfaa2e305d29d6ed714759d65c99836cdb847835 Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Tue, 27 Aug 2013 21:14:12 +0000 Subject: [PATCH 078/153] Adding the new test file for HDFS-3245 that was accidentally dropped git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1517994 13f79535-47bb-0310-9956-ffa450edef68 --- .../blockmanagement/TestDatanodeManager.java | 148 ++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java new file mode 100644 index 00000000000..75034932acd --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestDatanodeManager.java @@ -0,0 +1,148 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hdfs.server.blockmanagement; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Iterator; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Random; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.junit.Test; +import org.mockito.Mockito; +import org.mortbay.log.Log; + +import static org.junit.Assert.*; + +public class TestDatanodeManager { + + //The number of times the registration / removal of nodes should happen + final int NUM_ITERATIONS = 500; + + /** + * This test sends a random sequence of node registrations and node removals + * to the DatanodeManager (of nodes with different IDs and versions), and + * checks that the DatanodeManager keeps a correct count of different software + * versions at all times. + */ + @Test + public void testNumVersionsReportedCorrect() throws IOException { + //Create the DatanodeManager which will be tested + FSNamesystem fsn = Mockito.mock(FSNamesystem.class); + Mockito.when(fsn.hasWriteLock()).thenReturn(true); + DatanodeManager dm = new DatanodeManager(Mockito.mock(BlockManager.class), + fsn, new Configuration()); + + //Seed the RNG with a known value so test failures are easier to reproduce + Random rng = new Random(); + int seed = rng.nextInt(); + rng = new Random(seed); + Log.info("Using seed " + seed + " for testing"); + + //A map of the Storage IDs to the DN registration it was registered with + HashMap sIdToDnReg = + new HashMap(); + + for(int i=0; i> it = + sIdToDnReg.entrySet().iterator(); + for(int j=0; j mapToCheck = dm.getDatanodesSoftwareVersions(); + + //Remove counts from versions and make sure that after removing all nodes + //mapToCheck is empty + for(Entry it: sIdToDnReg.entrySet()) { + String ver = it.getValue().getSoftwareVersion(); + if(!mapToCheck.containsKey(ver)) { + throw new AssertionError("The correct number of datanodes of a " + + "version was not found on iteration " + i); + } + mapToCheck.put(ver, mapToCheck.get(ver) - 1); + if(mapToCheck.get(ver) == 0) { + mapToCheck.remove(ver); + } + } + for(Entry entry: mapToCheck.entrySet()) { + Log.info("Still in map: " + entry.getKey() + " has " + + entry.getValue()); + } + assertEquals("The map of version counts returned by DatanodeManager was" + + " not what it was expected to be on iteration " + i, 0, + mapToCheck.size()); + } + } + +} From 914a0e51729ffe9816d007fc08640c44cc6bc9ee Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 27 Aug 2013 23:36:42 +0000 Subject: [PATCH 079/153] YARN-981. Fixed YARN webapp so that /logs servlet works like before. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518030 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 140e97ca133..ceee8141a0a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -105,6 +105,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1008. MiniYARNCluster with multiple nodemanagers, all nodes have same key for allocations. (tucu) + YARN-981. Fixed YARN webapp so that /logs servlet works like before. (Jian He + via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 1a093a39cc6..622a9d8c239 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -242,7 +242,10 @@ public class WebApps { for(Map.Entry entry : attributes.entrySet()) { server.setAttribute(entry.getKey(), entry.getValue()); } - server.addGlobalFilter("guice", GuiceFilter.class.getName(), null); + String webAppPath = "/" + name + "/*"; + server.defineFilter(server.getWebAppContext(), "guice", + GuiceFilter.class.getName(), null, new String[] { webAppPath, "/" }); + webapp.setConf(conf); webapp.setHttpServer(server); server.start(); From ca5de53bc62b3d2adfc139f8a98cf44b5e05cbf8 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 27 Aug 2013 23:54:02 +0000 Subject: [PATCH 080/153] YARN-1083. Changed ResourceManager to fail when the expiry interval is less than the configured node-heartbeat interval. Contributed by Zhijie Shen. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518036 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../server/resourcemanager/ResourceManager.java | 14 ++++++++++++++ .../resourcemanager/TestResourceManager.java | 17 +++++++++++++++++ 3 files changed, 34 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ceee8141a0a..6aba407e6b2 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -54,6 +54,9 @@ Release 2.1.1-beta - UNRELEASED YARN-942. In Fair Scheduler documentation, inconsistency on which properties have prefix (Akira Ajisaka via Sandy Ryza) + YARN-1083. Changed ResourceManager to fail when the expiry interval is less + than the configured node-heartbeat interval. (Zhijie Shen via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 95e3207e4b2..6ca5307bc53 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -365,6 +365,20 @@ public class ResourceManager extends CompositeService implements Recoverable { + ", " + YarnConfiguration.RM_AM_MAX_ATTEMPTS + "=" + globalMaxAppAttempts + ", it should be a positive integer."); } + + // validate expireIntvl >= heartbeatIntvl + long expireIntvl = conf.getLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_NM_EXPIRY_INTERVAL_MS); + long heartbeatIntvl = + conf.getLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, + YarnConfiguration.DEFAULT_RM_NM_HEARTBEAT_INTERVAL_MS); + if (expireIntvl < heartbeatIntvl) { + throw new YarnRuntimeException("Nodemanager expiry interval should be no" + + " less than heartbeat interval, " + + YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS + "=" + expireIntvl + + ", " + YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS + "=" + + heartbeatIntvl); + } } @Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java index b6a1356b79c..9e427a46517 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java @@ -203,4 +203,21 @@ public class TestResourceManager { } } + @Test + public void testNMExpiryAndHeartbeatIntervalsValidation() throws Exception { + Configuration conf = new YarnConfiguration(); + conf.setLong(YarnConfiguration.RM_NM_EXPIRY_INTERVAL_MS, 1000); + conf.setLong(YarnConfiguration.RM_NM_HEARTBEAT_INTERVAL_MS, 1001); + resourceManager = new ResourceManager();; + try { + resourceManager.init(conf); + } catch (YarnRuntimeException e) { + // Exception is expected. + if (!e.getMessage().startsWith("Nodemanager expiry interval should be no" + + " less than heartbeat interval")) { + throw e; + } + } + } + } From 4047ad72cffa03ba8383f4448a0f921a3d4fcdf2 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 28 Aug 2013 05:13:21 +0000 Subject: [PATCH 081/153] YARN-602. Fixed NodeManager to not let users override some mandatory environmental variables. Contributed by Kenji Kikushima. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518077 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../launcher/ContainerLaunch.java | 12 ++-- .../launcher/TestContainerLaunch.java | 59 ++++++++++++++++++- 3 files changed, 65 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 6aba407e6b2..556d5163e06 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -111,6 +111,9 @@ Release 2.1.1-beta - UNRELEASED YARN-981. Fixed YARN webapp so that /logs servlet works like before. (Jian He via vinodkv) + YARN-602. Fixed NodeManager to not let users override some mandatory + environmental variables. (Kenji Kikushima via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index f4578dc83d8..89812d27efc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -588,20 +588,18 @@ public class ContainerLaunch implements Callable { environment.put(Environment.LOG_DIRS.name(), StringUtils.join(",", containerLogDirs)); - putEnvIfNotNull(environment, Environment.USER.name(), container.getUser()); + environment.put(Environment.USER.name(), container.getUser()); - putEnvIfNotNull(environment, - Environment.LOGNAME.name(),container.getUser()); - - putEnvIfNotNull(environment, - Environment.HOME.name(), + environment.put(Environment.LOGNAME.name(), container.getUser()); + + environment.put(Environment.HOME.name(), conf.get( YarnConfiguration.NM_USER_HOME_DIR, YarnConfiguration.DEFAULT_NM_USER_HOME_DIR ) ); - putEnvIfNotNull(environment, Environment.PWD.name(), pwd.toString()); + environment.put(Environment.PWD.name(), pwd.toString()); putEnvIfNotNull(environment, Environment.HADOOP_CONF_DIR.name(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 78c78c0d605..d25c1c4039b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -346,7 +346,6 @@ public class TestContainerLaunch extends BaseContainerManagerTest { ApplicationAttemptId appAttemptId = ApplicationAttemptId.newInstance(appId, 1); - int port = 12345; ContainerId cId = ContainerId.newInstance(appAttemptId, 0); Map userSetEnv = new HashMap(); userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id"); @@ -354,6 +353,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest { userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT"); userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT"); userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR"); + userSetEnv.put(Environment.USER.key(), "user_set_" + + Environment.USER.key()); + userSetEnv.put(Environment.LOGNAME.name(), "user_set_LOGNAME"); + userSetEnv.put(Environment.PWD.name(), "user_set_PWD"); + userSetEnv.put(Environment.HOME.name(), "user_set_HOME"); containerLaunchContext.setEnvironment(userSetEnv); File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile"); @@ -371,6 +375,14 @@ public class TestContainerLaunch extends BaseContainerManagerTest { + processStartFile); fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> " + processStartFile); + fileWriter.println("@echo " + Environment.USER.$() + ">> " + + processStartFile); + fileWriter.println("@echo " + Environment.LOGNAME.$() + ">> " + + processStartFile); + fileWriter.println("@echo " + Environment.PWD.$() + ">> " + + processStartFile); + fileWriter.println("@echo " + Environment.HOME.$() + ">> " + + processStartFile); fileWriter.println("@echo " + cId + ">> " + processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { @@ -385,6 +397,14 @@ public class TestContainerLaunch extends BaseContainerManagerTest { + processStartFile); fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> " + processStartFile); + fileWriter.write("\necho $" + Environment.USER.name() + " >> " + + processStartFile); + fileWriter.write("\necho $" + Environment.LOGNAME.name() + " >> " + + processStartFile); + fileWriter.write("\necho $" + Environment.PWD.name() + " >> " + + processStartFile); + fileWriter.write("\necho $" + Environment.HOME.name() + " >> " + + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } @@ -452,6 +472,22 @@ public class TestContainerLaunch extends BaseContainerManagerTest { reader.readLine()); Assert.assertEquals(String.valueOf(HTTP_PORT), reader.readLine()); Assert.assertEquals(StringUtils.join(",", appDirs), reader.readLine()); + Assert.assertEquals(user, reader.readLine()); + Assert.assertEquals(user, reader.readLine()); + String obtainedPWD = reader.readLine(); + boolean found = false; + for (Path localDir : appDirs) { + if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) { + found = true; + break; + } + } + Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found); + Assert.assertEquals( + conf.get( + YarnConfiguration.NM_USER_HOME_DIR, + YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), + reader.readLine()); Assert.assertEquals(cId.toString(), containerLaunchContext .getEnvironment().get(Environment.CONTAINER_ID.name())); @@ -465,6 +501,26 @@ public class TestContainerLaunch extends BaseContainerManagerTest { .getEnvironment().get(Environment.LOCAL_DIRS.name())); Assert.assertEquals(StringUtils.join(",", containerLogDirs), containerLaunchContext.getEnvironment().get(Environment.LOG_DIRS.name())); + Assert.assertEquals(user, containerLaunchContext.getEnvironment() + .get(Environment.USER.name())); + Assert.assertEquals(user, containerLaunchContext.getEnvironment() + .get(Environment.LOGNAME.name())); + found = false; + obtainedPWD = + containerLaunchContext.getEnvironment().get(Environment.PWD.name()); + for (Path localDir : appDirs) { + if (new Path(localDir, cId.toString()).toString().equals(obtainedPWD)) { + found = true; + break; + } + } + Assert.assertTrue("Wrong local-dir found : " + obtainedPWD, found); + Assert.assertEquals( + conf.get( + YarnConfiguration.NM_USER_HOME_DIR, + YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), + containerLaunchContext.getEnvironment() + .get(Environment.HOME.name())); // Get the pid of the process String pid = reader.readLine().trim(); @@ -538,7 +594,6 @@ public class TestContainerLaunch extends BaseContainerManagerTest { ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); - int port = 12345; // upload the script file so that the container can run it URL resource_alpha = From 53f559dd797e1bf3bd144a5721615eafc055e005 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 28 Aug 2013 05:46:09 +0000 Subject: [PATCH 082/153] YARN-1081. Made a trivial change to YARN node CLI header to avoid potential confusion. Contributed by Akira AJISAKA. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518080 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../hadoop/yarn/client/cli/NodeCLI.java | 4 +- .../hadoop/yarn/client/cli/TestYarnCLI.java | 92 +++++++++---------- 3 files changed, 51 insertions(+), 48 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 556d5163e06..2945b77817f 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -57,6 +57,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1083. Changed ResourceManager to fail when the expiry interval is less than the configured node-heartbeat interval. (Zhijie Shen via vinodkv) + YARN-1081. Made a trivial change to YARN node CLI header to avoid potential + confusion. (Akira AJISAKA via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java index afe3287b5f1..c62b4d40598 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java @@ -43,7 +43,7 @@ import org.apache.hadoop.yarn.util.ConverterUtils; @Private @Unstable public class NodeCLI extends YarnCLI { - private static final String NODES_PATTERN = "%16s\t%15s\t%17s\t%18s" + + private static final String NODES_PATTERN = "%16s\t%15s\t%17s\t%28s" + System.getProperty("line.separator"); private static final String NODE_STATE_CMD = "states"; @@ -133,7 +133,7 @@ public class NodeCLI extends YarnCLI { nodeStates.toArray(new NodeState[0])); writer.println("Total Nodes:" + nodesReport.size()); writer.printf(NODES_PATTERN, "Node-Id", "Node-State", "Node-Http-Address", - "Running-Containers"); + "Number-of-Running-Containers"); for (NodeReport nodeReport : nodesReport) { writer.printf(NODES_PATTERN, nodeReport.getNodeId(), nodeReport .getNodeState(), nodeReport.getHttpAddress(), nodeReport diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 6d53108f95a..c6b49465e1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -543,9 +543,9 @@ public class TestYarnCLI { PrintWriter pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t NEW\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t NEW\t host1:8888\t"); + pw.println(" 0"); pw.close(); String nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); @@ -564,11 +564,11 @@ public class TestYarnCLI { pw = new PrintWriter(baos); pw.println("Total Nodes:2"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); - pw.print(" host1:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t RUNNING\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host1:0\t RUNNING\t host1:8888\t"); + pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); @@ -593,9 +593,9 @@ public class TestYarnCLI { pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t UNHEALTHY\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t UNHEALTHY\t host1:8888\t"); + pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); @@ -614,9 +614,9 @@ public class TestYarnCLI { pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t DECOMMISSIONED\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t"); + pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); @@ -635,9 +635,9 @@ public class TestYarnCLI { pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t REBOOTED\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t REBOOTED\t host1:8888\t"); + pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); @@ -656,9 +656,9 @@ public class TestYarnCLI { pw = new PrintWriter(baos); pw.println("Total Nodes:1"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t LOST\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t LOST\t host1:8888\t"); + pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); @@ -681,17 +681,17 @@ public class TestYarnCLI { pw = new PrintWriter(baos); pw.println("Total Nodes:5"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t NEW\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); - pw.print(" host1:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t REBOOTED\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t LOST\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t NEW\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t RUNNING\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host1:0\t RUNNING\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t REBOOTED\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t LOST\t host1:8888\t"); + pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); @@ -712,21 +712,21 @@ public class TestYarnCLI { pw = new PrintWriter(baos); pw.println("Total Nodes:7"); pw.print(" Node-Id\t Node-State\tNode-Http-Address\t"); - pw.println("Running-Containers"); - pw.print(" host0:0\t NEW\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); - pw.print(" host1:0\t RUNNING\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t UNHEALTHY\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t DECOMMISSIONED\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t REBOOTED\t host1:8888"); - pw.println("\t 0"); - pw.print(" host0:0\t LOST\t host1:8888"); - pw.println("\t 0"); + pw.println("Number-of-Running-Containers"); + pw.print(" host0:0\t NEW\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t RUNNING\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host1:0\t RUNNING\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t UNHEALTHY\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t DECOMMISSIONED\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t REBOOTED\t host1:8888\t"); + pw.println(" 0"); + pw.print(" host0:0\t LOST\t host1:8888\t"); + pw.println(" 0"); pw.close(); nodesReportStr = baos.toString("UTF-8"); Assert.assertEquals(nodesReportStr, sysOutStream.toString()); From 30b8ef91a32ddf1fe3756bae6d7dc538a150bdc4 Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Wed, 28 Aug 2013 17:23:55 +0000 Subject: [PATCH 083/153] HDFS-5078 Support file append in NFSv3 gateway to enable data streaming to HDFS. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518292 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/nfs/nfs3/Nfs3Constant.java | 4 ++ .../hadoop/hdfs/nfs/nfs3/OpenFileCtx.java | 13 ++++-- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 14 ++++-- .../hadoop/hdfs/nfs/nfs3/WriteManager.java | 45 ++++++++++++++++--- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ 5 files changed, 65 insertions(+), 14 deletions(-) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index 1701cc12dd8..8e9a8f10764 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -201,4 +201,8 @@ public class Nfs3Constant { public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "hdfs.nfs.exports.cache.expirytime.millis"; public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min + public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir"; + public static final String FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs"; + public static final String ENABLE_FILE_DUMP_KEY = "dfs.nfs3.enableDump"; + public static final boolean ENABLE_FILE_DUMP_DEFAULT = true; } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java index e13bebcc6f9..12de05e058e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/OpenFileCtx.java @@ -126,6 +126,9 @@ class OpenFileCtx { nonSequentialWriteInMemory = 0; this.dumpFilePath = dumpFilePath; enabledDump = dumpFilePath == null ? false: true; + nextOffset = latestAttr.getSize(); + assert(nextOffset == this.fos.getPos()); + ctxLock = new ReentrantLock(true); } @@ -685,12 +688,14 @@ class OpenFileCtx { try { fos.write(data, 0, count); - - if (fos.getPos() != (offset + count)) { + + long flushedOffset = getFlushedOffset(); + if (flushedOffset != (offset + count)) { throw new IOException("output stream is out of sync, pos=" - + fos.getPos() + " and nextOffset should be" + (offset + count)); + + flushedOffset + " and nextOffset should be" + + (offset + count)); } - nextOffset = fos.getPos(); + nextOffset = flushedOffset; // Reduce memory occupation size if request was allowed dumped if (writeCtx.getDataState() == DataState.ALLOW_DUMP) { diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index d8694198b95..1f39ace973f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem.Statistics; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Options; @@ -123,7 +124,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { private final Configuration config = new Configuration(); private final WriteManager writeManager; - private final IdUserGroup iug;// = new IdUserGroup(); + private final IdUserGroup iug; private final DFSClientCache clientCache; private final NfsExports exports; @@ -161,10 +162,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { DFSConfigKeys.DFS_REPLICATION_DEFAULT); blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT); - bufferSize = config.getInt("io.file.buffer.size", 4096); + bufferSize = config.getInt( + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); - writeDumpDir = config.get("dfs.nfs3.dump.dir", "/tmp/.hdfs-nfs"); - boolean enableDump = config.getBoolean("dfs.nfs3.enableDump", true); + writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY, + Nfs3Constant.FILE_DUMP_DIR_DEFAULT); + boolean enableDump = config.getBoolean(Nfs3Constant.ENABLE_FILE_DUMP_KEY, + Nfs3Constant.ENABLE_FILE_DUMP_DEFAULT); if (!enableDump) { writeDumpDir = null; } else { @@ -1112,6 +1117,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } + @Override public SYMLINK3Response symlink(XDR xdr, RpcAuthSys authSys, InetAddress client) { return new SYMLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java index 0e96c506d2a..70e9bc396c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java @@ -25,7 +25,9 @@ import java.util.concurrent.ConcurrentMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.hdfs.DFSClient; +import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; import org.apache.hadoop.nfs.NfsFileType; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.IdUserGroup; @@ -48,6 +50,7 @@ import com.google.common.collect.Maps; public class WriteManager { public static final Log LOG = LogFactory.getLog(WriteManager.class); + private final Configuration config; private final IdUserGroup iug; private final ConcurrentMap openFileMap = Maps .newConcurrentMap(); @@ -76,6 +79,7 @@ public class WriteManager { WriteManager(IdUserGroup iug, final Configuration config) { this.iug = iug; + this.config = config; streamTimeout = config.getLong("dfs.nfs3.stream.timeout", DEFAULT_STREAM_TIMEOUT); @@ -129,12 +133,41 @@ public class WriteManager { OpenFileCtx openFileCtx = openFileMap.get(fileHandle); if (openFileCtx == null) { LOG.info("No opened stream for fileId:" + fileHandle.getFileId()); - WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), preOpAttr); - WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, - fileWcc, count, request.getStableHow(), - Nfs3Constant.WRITE_COMMIT_VERF); - Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid)); - return; + + String fileIdPath = Nfs3Utils.getFileIdPath(fileHandle.getFileId()); + HdfsDataOutputStream fos = null; + Nfs3FileAttributes latestAttr = null; + try { + int bufferSize = config.getInt( + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY, + CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT); + + fos = dfsClient.append(fileIdPath, bufferSize, null, null); + + latestAttr = Nfs3Utils.getFileAttr(dfsClient, fileIdPath, iug); + } catch (IOException e) { + LOG.error("Can't apapend to file:" + fileIdPath + ", error:" + e); + if (fos != null) { + fos.close(); + } + WccData fileWcc = new WccData(Nfs3Utils.getWccAttr(preOpAttr), + preOpAttr); + WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3ERR_IO, + fileWcc, count, request.getStableHow(), + Nfs3Constant.WRITE_COMMIT_VERF); + Nfs3Utils.writeChannel(channel, response.send(new XDR(), xid)); + return; + } + + // Add open stream + String writeDumpDir = config.get(Nfs3Constant.FILE_DUMP_DIR_KEY, + Nfs3Constant.FILE_DUMP_DIR_DEFAULT); + openFileCtx = new OpenFileCtx(fos, latestAttr, writeDumpDir + "/" + + fileHandle.getFileId()); + addOpenFileStream(fileHandle, openFileCtx); + if (LOG.isDebugEnabled()) { + LOG.debug("opened stream for file:" + fileHandle.getFileId()); + } } // Add write into the async job queue diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 48ebda3437d..90ba9465e8f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -307,6 +307,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-4947 Add NFS server export table to control export by hostname or IP range (Jing Zhao via brandonli) + HDFS-5078 Support file append in NFSv3 gateway to enable data streaming + to HDFS (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 4d0df036431a079ab064b9100fd589c634146144 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Wed, 28 Aug 2013 17:42:47 +0000 Subject: [PATCH 084/153] HADOOP-9910. proxy server start and stop documentation wrong. Contributed by Andre Kelpe. (harsh) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518296 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../hadoop-common/src/site/apt/ClusterSetup.apt.vm | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 7b749b7a1bd..1ebf3f74c40 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -362,6 +362,9 @@ Release 2.1.1-beta - UNRELEASED IMPROVEMENTS + HADOOP-9910. proxy server start and stop documentation wrong + (André Kelpevia harsh) + HADOOP-9446. Support Kerberos SPNEGO for IBM JDK. (Yu Gao via llu) HADOOP-9787. ShutdownHelper util to shutdown threads and threadpools. diff --git a/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm b/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm index 7ac6755b5a4..121754413d0 100644 --- a/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm +++ b/hadoop-common-project/hadoop-common/src/site/apt/ClusterSetup.apt.vm @@ -518,7 +518,7 @@ $ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR start nodemana are used with load balancing it should be run on each of them: ---- -$ $HADOOP_YARN_HOME/bin/yarn start proxyserver --config $HADOOP_CONF_DIR +$ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh start proxyserver --config $HADOOP_CONF_DIR ---- Start the MapReduce JobHistory Server with the following command, run on the @@ -560,7 +560,7 @@ $ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh --config $HADOOP_CONF_DIR stop nodemanag balancing it should be run on each of them: ---- -$ $HADOOP_YARN_HOME/bin/yarn stop proxyserver --config $HADOOP_CONF_DIR +$ $HADOOP_YARN_HOME/sbin/yarn-daemon.sh stop proxyserver --config $HADOOP_CONF_DIR ---- From 87f8c0a395b7ee0d9ca2c2418297f3f5b4988cc6 Mon Sep 17 00:00:00 2001 From: Harsh J Date: Wed, 28 Aug 2013 17:54:19 +0000 Subject: [PATCH 085/153] Addendum to HADOOP-9910 for trunk. Removed bad characters from CHANGES.txt note that was causing odd issues. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518302 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 1ebf3f74c40..35917e22ee4 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -363,7 +363,7 @@ Release 2.1.1-beta - UNRELEASED IMPROVEMENTS HADOOP-9910. proxy server start and stop documentation wrong - (André Kelpevia harsh) + (Andre Kelpe via harsh) HADOOP-9446. Support Kerberos SPNEGO for IBM JDK. (Yu Gao via llu) From f3c0074030864a0f1da2e4c2376798585cf13db0 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Wed, 28 Aug 2013 17:54:49 +0000 Subject: [PATCH 086/153] HADOOP-9906. Move HAZKUtil to o.a.h.util.ZKUtil and make inner-classes public (Karthik Kambatla via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518303 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../org/apache/hadoop/ha/ActiveStandbyElector.java | 3 +-- .../org/apache/hadoop/ha/ZKFailoverController.java | 11 ++++++----- .../apache/hadoop/ha/TestActiveStandbyElector.java | 2 +- .../hadoop/ha/TestActiveStandbyElectorRealZK.java | 2 +- 5 files changed, 12 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 35917e22ee4..8e8aa0b70c8 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -390,6 +390,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9886. Turn warning message in RetryInvocationHandler to debug (arpit) + HADOOP-9906. Move HAZKUtil to o.a.h.util.ZKUtil and make inner-classes + public (Karthik Kambatla via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java index 68ddc64fa31..4682bc70261 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java @@ -31,7 +31,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo; +import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.hadoop.util.StringUtils; import org.apache.zookeeper.data.ACL; import org.apache.zookeeper.KeeperException; @@ -47,7 +47,6 @@ import org.apache.zookeeper.KeeperException.Code; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; /** * diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java index 712c1d0f182..3c72c4720f6 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java @@ -36,7 +36,8 @@ import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback; import org.apache.hadoop.ha.HAServiceProtocol.StateChangeRequestInfo; import org.apache.hadoop.ha.HAServiceProtocol.RequestSource; -import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo; +import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.hadoop.ha.HealthMonitor.State; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.AccessControlException; @@ -313,18 +314,18 @@ public abstract class ZKFailoverController { ZK_SESSION_TIMEOUT_DEFAULT); // Parse ACLs from configuration. String zkAclConf = conf.get(ZK_ACL_KEY, ZK_ACL_DEFAULT); - zkAclConf = HAZKUtil.resolveConfIndirection(zkAclConf); - List zkAcls = HAZKUtil.parseACLs(zkAclConf); + zkAclConf = ZKUtil.resolveConfIndirection(zkAclConf); + List zkAcls = ZKUtil.parseACLs(zkAclConf); if (zkAcls.isEmpty()) { zkAcls = Ids.CREATOR_ALL_ACL; } // Parse authentication from configuration. String zkAuthConf = conf.get(ZK_AUTH_KEY); - zkAuthConf = HAZKUtil.resolveConfIndirection(zkAuthConf); + zkAuthConf = ZKUtil.resolveConfIndirection(zkAuthConf); List zkAuths; if (zkAuthConf != null) { - zkAuths = HAZKUtil.parseAuth(zkAuthConf); + zkAuths = ZKUtil.parseAuth(zkAuthConf); } else { zkAuths = Collections.emptyList(); } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java index 309c7ad6ed7..9e3cc4162bc 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElector.java @@ -41,7 +41,7 @@ import org.mockito.Mockito; import org.apache.hadoop.HadoopIllegalArgumentException; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveNotFoundException; -import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo; +import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.hadoop.test.GenericTestUtils; public class TestActiveStandbyElector { diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java index 7aee139a8ee..c4326842992 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestActiveStandbyElectorRealZK.java @@ -28,7 +28,7 @@ import java.util.UUID; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback; import org.apache.hadoop.ha.ActiveStandbyElector.State; -import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo; +import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.hadoop.util.Shell; import org.apache.log4j.Level; import org.apache.zookeeper.ZooDefs.Ids; From 82fc0f1855cd187adc99c6643e0ad35c82678a2c Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Wed, 28 Aug 2013 18:03:37 +0000 Subject: [PATCH 087/153] Adding and removing files missed for HADOOP-9906 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518306 13f79535-47bb-0310-9956-ffa450edef68 --- .../{ha/HAZKUtil.java => util/ZKUtil.java} | 31 ++++++++++------- .../TestZKUtil.java} | 33 ++++++++++--------- 2 files changed, 36 insertions(+), 28 deletions(-) rename hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/{ha/HAZKUtil.java => util/ZKUtil.java} (88%) rename hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/{ha/TestHAZKUtil.java => util/TestZKUtil.java} (82%) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java similarity index 88% rename from hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java rename to hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java index 093b878bd14..bd08efb5cda 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ZKUtil.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ha; +package org.apache.hadoop.util; import java.io.File; import java.io.IOException; @@ -36,7 +36,7 @@ import com.google.common.io.Files; * Utilities for working with ZooKeeper. */ @InterfaceAudience.Private -public class HAZKUtil { +public class ZKUtil { /** * Parse ACL permission string, partially borrowed from @@ -76,9 +76,10 @@ public class HAZKUtil { * sasl:hdfs/host1@MY.DOMAIN:cdrwa,sasl:hdfs/host2@MY.DOMAIN:cdrwa * * @return ACL list - * @throws HadoopIllegalArgumentException if an ACL is invalid + * @throws {@link BadAclFormatException} if an ACL is invalid */ - public static List parseACLs(String aclString) { + public static List parseACLs(String aclString) throws + BadAclFormatException { List acl = Lists.newArrayList(); if (aclString == null) { return acl; @@ -113,8 +114,10 @@ public class HAZKUtil { * * @param authString the comma-separated auth mechanisms * @return a list of parsed authentications + * @throws {@link BadAuthFormatException} if the auth format is invalid */ - public static List parseAuth(String authString) { + public static List parseAuth(String authString) throws + BadAuthFormatException{ List ret = Lists.newArrayList(); if (authString == null) { return ret; @@ -161,7 +164,8 @@ public class HAZKUtil { /** * An authentication token passed to ZooKeeper.addAuthInfo */ - static class ZKAuthInfo { + @InterfaceAudience.Private + public static class ZKAuthInfo { private final String scheme; private final byte[] auth; @@ -171,29 +175,32 @@ public class HAZKUtil { this.auth = auth; } - String getScheme() { + public String getScheme() { return scheme; } - byte[] getAuth() { + public byte[] getAuth() { return auth; } } - static class BadAclFormatException extends HadoopIllegalArgumentException { + @InterfaceAudience.Private + public static class BadAclFormatException extends + HadoopIllegalArgumentException { private static final long serialVersionUID = 1L; public BadAclFormatException(String message) { super(message); } } - - static class BadAuthFormatException extends HadoopIllegalArgumentException { + + @InterfaceAudience.Private + public static class BadAuthFormatException extends + HadoopIllegalArgumentException { private static final long serialVersionUID = 1L; public BadAuthFormatException(String message) { super(message); } } - } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java similarity index 82% rename from hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java rename to hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java index 90371f3730f..1d14326d2ab 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ha/TestHAZKUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestZKUtil.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.ha; +package org.apache.hadoop.util; import static org.junit.Assert.*; @@ -24,8 +24,9 @@ import java.io.FileNotFoundException; import java.io.IOException; import java.util.List; -import org.apache.hadoop.ha.HAZKUtil.BadAclFormatException; -import org.apache.hadoop.ha.HAZKUtil.ZKAuthInfo; +import org.apache.hadoop.util.ZKUtil; +import org.apache.hadoop.util.ZKUtil.BadAclFormatException; +import org.apache.hadoop.util.ZKUtil.ZKAuthInfo; import org.apache.zookeeper.ZooDefs.Perms; import org.apache.zookeeper.data.ACL; import org.junit.Test; @@ -33,9 +34,9 @@ import org.junit.Test; import com.google.common.base.Charsets; import com.google.common.io.Files; -public class TestHAZKUtil { +public class TestZKUtil { private static final String TEST_ROOT_DIR = System.getProperty( - "test.build.data", "/tmp") + "/TestHAZKUtil"; + "test.build.data", "/tmp") + "/TestZKUtil"; private static final File TEST_FILE = new File(TEST_ROOT_DIR, "test-file"); @@ -45,13 +46,13 @@ public class TestHAZKUtil { @Test public void testEmptyACL() { - List result = HAZKUtil.parseACLs(""); + List result = ZKUtil.parseACLs(""); assertTrue(result.isEmpty()); } @Test public void testNullACL() { - List result = HAZKUtil.parseACLs(null); + List result = ZKUtil.parseACLs(null); assertTrue(result.isEmpty()); } @@ -67,7 +68,7 @@ public class TestHAZKUtil { private static void badAcl(String acls, String expectedErr) { try { - HAZKUtil.parseACLs(acls); + ZKUtil.parseACLs(acls); fail("Should have failed to parse '" + acls + "'"); } catch (BadAclFormatException e) { assertEquals(expectedErr, e.getMessage()); @@ -76,7 +77,7 @@ public class TestHAZKUtil { @Test public void testGoodACLs() { - List result = HAZKUtil.parseACLs( + List result = ZKUtil.parseACLs( "sasl:hdfs/host1@MY.DOMAIN:cdrwa, sasl:hdfs/host2@MY.DOMAIN:ca"); ACL acl0 = result.get(0); assertEquals(Perms.CREATE | Perms.DELETE | Perms.READ | @@ -92,19 +93,19 @@ public class TestHAZKUtil { @Test public void testEmptyAuth() { - List result = HAZKUtil.parseAuth(""); + List result = ZKUtil.parseAuth(""); assertTrue(result.isEmpty()); } @Test public void testNullAuth() { - List result = HAZKUtil.parseAuth(null); + List result = ZKUtil.parseAuth(null); assertTrue(result.isEmpty()); } @Test public void testGoodAuths() { - List result = HAZKUtil.parseAuth( + List result = ZKUtil.parseAuth( "scheme:data,\n scheme2:user:pass"); assertEquals(2, result.size()); ZKAuthInfo auth0 = result.get(0); @@ -118,16 +119,16 @@ public class TestHAZKUtil { @Test public void testConfIndirection() throws IOException { - assertNull(HAZKUtil.resolveConfIndirection(null)); - assertEquals("x", HAZKUtil.resolveConfIndirection("x")); + assertNull(ZKUtil.resolveConfIndirection(null)); + assertEquals("x", ZKUtil.resolveConfIndirection("x")); TEST_FILE.getParentFile().mkdirs(); Files.write("hello world", TEST_FILE, Charsets.UTF_8); - assertEquals("hello world", HAZKUtil.resolveConfIndirection( + assertEquals("hello world", ZKUtil.resolveConfIndirection( "@" + TEST_FILE.getAbsolutePath())); try { - HAZKUtil.resolveConfIndirection("@" + BOGUS_FILE); + ZKUtil.resolveConfIndirection("@" + BOGUS_FILE); fail("Did not throw for non-existent file reference"); } catch (FileNotFoundException fnfe) { assertTrue(fnfe.getMessage().startsWith(BOGUS_FILE)); From 56c1b9de0c962661ff4f8650177ca33a24368ea5 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Wed, 28 Aug 2013 21:09:08 +0000 Subject: [PATCH 088/153] YARN-1101. Active nodes can be decremented below 0 (Robert Parker via tgraves_ git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518384 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 6 +++ .../resourcemanager/rmnode/RMNodeImpl.java | 21 +++++++--- .../TestRMNodeTransitions.java | 42 +++++++++++++++++++ 3 files changed, 64 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 2945b77817f..3f03584542b 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -117,6 +117,9 @@ Release 2.1.1-beta - UNRELEASED YARN-602. Fixed NodeManager to not let users override some mandatory environmental variables. (Kenji Kikushima via vinodkv) + YARN-1101. Active nodes can be decremented below 0 (Robert Parker + via tgraves) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES @@ -1235,6 +1238,9 @@ Release 0.23.10 - UNRELEASED YARN-337. RM handles killed application tracking URL poorly (jlowe) + YARN-1101. Active nodes can be decremented below 0 (Robert Parker + via tgraves) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java index 3158a3b6c2d..fa6ae4a20a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNodeImpl.java @@ -393,9 +393,18 @@ public class RMNodeImpl implements RMNode, EventHandler { } } - private void updateMetricsForDeactivatedNode(NodeState finalState) { + private void updateMetricsForDeactivatedNode(NodeState initialState, + NodeState finalState) { ClusterMetrics metrics = ClusterMetrics.getMetrics(); - metrics.decrNumActiveNodes(); + + switch (initialState) { + case RUNNING: + metrics.decrNumActiveNodes(); + break; + case UNHEALTHY: + metrics.decrNumUnhealthyNMs(); + break; + } switch (finalState) { case DECOMMISSIONED: @@ -505,7 +514,8 @@ public class RMNodeImpl implements RMNode, EventHandler { // If the current state is NodeState.UNHEALTHY // Then node is already been removed from the // Scheduler - if (!rmNode.getState().equals(NodeState.UNHEALTHY)) { + NodeState initialState = rmNode.getState(); + if (!initialState.equals(NodeState.UNHEALTHY)) { rmNode.context.getDispatcher().getEventHandler() .handle(new NodeRemovedSchedulerEvent(rmNode)); } @@ -520,7 +530,7 @@ public class RMNodeImpl implements RMNode, EventHandler { rmNode.context.getInactiveRMNodes().put(rmNode.nodeId.getHost(), rmNode); //Update the metrics - rmNode.updateMetricsForDeactivatedNode(finalState); + rmNode.updateMetricsForDeactivatedNode(initialState, finalState); } } @@ -550,7 +560,8 @@ public class RMNodeImpl implements RMNode, EventHandler { new NodesListManagerEvent( NodesListManagerEventType.NODE_UNUSABLE, rmNode)); // Update metrics - rmNode.updateMetricsForDeactivatedNode(NodeState.UNHEALTHY); + rmNode.updateMetricsForDeactivatedNode(rmNode.getState(), + NodeState.UNHEALTHY); return NodeState.UNHEALTHY; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 1def2443438..694d2826bfe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -267,7 +267,21 @@ public class TestRMNodeTransitions { @Test public void testUnhealthyExpire() { RMNodeImpl node = getUnhealthyNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE)); + Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost + 1, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy - 1, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.LOST, node.getState()); } @@ -291,8 +305,22 @@ public class TestRMNodeTransitions { @Test public void testUnhealthyDecommission() { RMNodeImpl node = getUnhealthyNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.DECOMMISSION)); + Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy - 1, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned + 1, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.DECOMMISSIONED, node.getState()); } @@ -307,8 +335,22 @@ public class TestRMNodeTransitions { @Test public void testUnhealthyRebooting() { RMNodeImpl node = getUnhealthyNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING)); + Assert.assertEquals("Active Nodes", initialActive, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy - 1, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted + 1, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.REBOOTED, node.getState()); } From 4ba7a5e5642af2db82d8ab0af8ae65758297c1ad Mon Sep 17 00:00:00 2001 From: Arpit Agarwal Date: Thu, 29 Aug 2013 00:06:04 +0000 Subject: [PATCH 089/153] HADOOP-9894. Race condition in Shell leads to logged error stream handling exceptions (Arpit Agarwal) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518420 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/util/Shell.java | 9 +++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 8e8aa0b70c8..c1f8d39511d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -429,6 +429,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9887. globStatus does not correctly handle paths starting with a drive spec on Windows. (Chuan Liu via cnauroth) + HADOOP-9894. Race condition in Shell leads to logged error stream handling + exceptions (Arpit Agarwal) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 0a8ce2e9983..9f6fcc21257 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -515,8 +515,13 @@ abstract public class Shell { } catch (IOException ioe) { LOG.warn("Error while closing the input stream", ioe); } - if (!completed.get()) { - errThread.interrupt(); + try { + if (!completed.get()) { + errThread.interrupt(); + errThread.join(); + } + } catch (InterruptedException ie) { + LOG.warn("Interrupted while joining errThread"); } try { errReader.close(); From c13893d5d9ca89e2603a62f5847a3a3602060dbe Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Thu, 29 Aug 2013 02:22:54 +0000 Subject: [PATCH 090/153] YARN-1034. Remove "experimental" in the Fair Scheduler documentation. (Karthik Kambatla via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518444 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm | 3 +-- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 3f03584542b..939aafe0f86 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -60,6 +60,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1081. Made a trivial change to YARN node CLI header to avoid potential confusion. (Akira AJISAKA via vinodkv) + YARN-1034. Remove "experimental" in the Fair Scheduler documentation. + (Karthik Kambatla via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index 5d8083423f7..84d6ca29119 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -25,8 +25,7 @@ Hadoop MapReduce Next Generation - Fair Scheduler * {Purpose} This document describes the <<>>, a pluggable scheduler for Hadoop - which provides a way to share large clusters. <> The Fair Scheduler - implementation is currently under development and should be considered experimental. + that allows YARN applications to share resources in large clusters fairly. * {Introduction} From febedd64e998c70594d84e2dc273cc0a469544e2 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Thu, 29 Aug 2013 17:55:30 +0000 Subject: [PATCH 091/153] YARN-1080. Improved help message for "yarn logs" command. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518731 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../hadoop/yarn/logaggregation/LogDumper.java | 30 +++++++++---- .../yarn/logaggregation/TestLogDumper.java | 42 +++++++++++++++++++ 3 files changed, 66 insertions(+), 9 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 939aafe0f86..c9e3df0f54f 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -63,6 +63,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1034. Remove "experimental" in the Fair Scheduler documentation. (Karthik Kambatla via Sandy Ryza) + YARN-1080. Improved help message for "yarn logs" command. (Xuan Gong via + vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogDumper.java index f9e6c140894..1e7ed44e2d3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogDumper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/logaggregation/LogDumper.java @@ -72,10 +72,18 @@ public class LogDumper extends Configured implements Tool { + "nodename:port (must be specified if container id is specified)"); opts.addOption(APP_OWNER_OPTION, true, "AppOwner (assumed to be current user if not specified)"); + opts.getOption(APPLICATION_ID_OPTION).setArgName("Application ID"); + opts.getOption(CONTAINER_ID_OPTION).setArgName("Container ID"); + opts.getOption(NODE_ADDRESS_OPTION).setArgName("Node Address"); + opts.getOption(APP_OWNER_OPTION).setArgName("Application Owner"); + + Options printOpts = new Options(); + printOpts.addOption(opts.getOption(CONTAINER_ID_OPTION)); + printOpts.addOption(opts.getOption(NODE_ADDRESS_OPTION)); + printOpts.addOption(opts.getOption(APP_OWNER_OPTION)); if (args.length < 1) { - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("general options are: ", opts); + printHelpMessage(printOpts); return -1; } @@ -92,16 +100,13 @@ public class LogDumper extends Configured implements Tool { appOwner = commandLine.getOptionValue(APP_OWNER_OPTION); } catch (ParseException e) { System.out.println("options parsing failed: " + e.getMessage()); - - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("general options are: ", opts); + printHelpMessage(printOpts); return -1; } if (appIdStr == null) { System.out.println("ApplicationId cannot be null!"); - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("general options are: ", opts); + printHelpMessage(printOpts); return -1; } @@ -119,8 +124,7 @@ public class LogDumper extends Configured implements Tool { } else if ((containerIdStr == null && nodeAddress != null) || (containerIdStr != null && nodeAddress == null)) { System.out.println("ContainerId or NodeAddress cannot be null!"); - HelpFormatter formatter = new HelpFormatter(); - formatter.printHelp("general options are: ", opts); + printHelpMessage(printOpts); resultCode = -1; } else { Path remoteRootLogDir = @@ -255,4 +259,12 @@ public class LogDumper extends Configured implements Tool { int exitCode = logDumper.run(args); System.exit(exitCode); } + + private void printHelpMessage(Options options) { + System.out.println("Retrieve logs for completed YARN applications."); + HelpFormatter formatter = new HelpFormatter(); + formatter.printHelp("yarn logs -applicationId [OPTIONS]", new Options()); + formatter.setSyntaxPrefix(""); + formatter.printHelp("general options are:", options); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestLogDumper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestLogDumper.java index 527a551f2d6..683b80c2c5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestLogDumper.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/logaggregation/TestLogDumper.java @@ -19,14 +19,30 @@ package org.apache.hadoop.yarn.logaggregation; import static org.junit.Assert.assertTrue; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; +import java.io.PrintWriter; + +import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.junit.Before; import org.junit.Test; public class TestLogDumper { + ByteArrayOutputStream sysOutStream; + private PrintStream sysOut; + + @Before + public void setUp() { + sysOutStream = new ByteArrayOutputStream(); + sysOut = new PrintStream(sysOutStream); + System.setOut(sysOut); + } + @Test public void testFailResultCodes() throws Exception { Configuration conf = new YarnConfiguration(); @@ -44,4 +60,30 @@ public class TestLogDumper { "nonexistentnode:1234", "nobody"); assertTrue("Should return an error code", exitCode != 0); } + + @Test + public void testHelpMessage() throws Exception { + Configuration conf = new YarnConfiguration(); + LogDumper dumper = new LogDumper(); + dumper.setConf(conf); + + int exitCode = dumper.run(new String[]{}); + assertTrue(exitCode == -1); + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(baos); + pw.println("Retrieve logs for completed YARN applications."); + pw.println("usage: yarn logs -applicationId [OPTIONS]"); + pw.println(); + pw.println("general options are:"); + pw.println(" -appOwner AppOwner (assumed to be current user if"); + pw.println(" not specified)"); + pw.println(" -containerId ContainerId (must be specified if node"); + pw.println(" address is specified)"); + pw.println(" -nodeAddress NodeAddress in the format nodename:port"); + pw.println(" (must be specified if container id is"); + pw.println(" specified)"); + pw.close(); + String appReportStr = baos.toString("UTF-8"); + Assert.assertEquals(appReportStr, sysOutStream.toString()); + } } From a29714f07708fdf9ff380d93422ab5a64dc34885 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Thu, 29 Aug 2013 19:24:41 +0000 Subject: [PATCH 092/153] MAPREDUCE-5483. revert MAPREDUCE-5357. (rkanter via tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518772 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java | 1 - 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 5769d926e67..f651aa60ed3 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -243,6 +243,8 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5476. Changed MR AM recovery code to cleanup staging-directory only after unregistering from the RM. (Jian He via vinodkv) + MAPREDUCE-5483. revert MAPREDUCE-5357. (rkanter via tucu) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java index 7e0453d3bfb..a4ea1d80a08 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmissionFiles.java @@ -124,7 +124,6 @@ public class JobSubmissionFiles { } else { fs.mkdirs(stagingArea, new FsPermission(JOB_DIR_PERMISSION)); - fs.setOwner(stagingArea, currentUser, null); } return stagingArea; } From 5d4b684c02ef8b702c68345d7306be2d19195161 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Thu, 29 Aug 2013 21:07:57 +0000 Subject: [PATCH 093/153] MAPREDUCE-5441. Changed MR AM to return RUNNING state if exiting when RM commands to reboot, so that client can continue to track the overall job. Contributed by xJian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518821 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 ++ .../mapreduce/v2/app/job/impl/JobImpl.java | 10 +++- .../hadoop/mapreduce/v2/app/TestMRApp.java | 47 +++++++++++++++++ .../v2/app/job/impl/TestJobImpl.java | 50 ++++++++++++------- 4 files changed, 90 insertions(+), 21 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index f651aa60ed3..8ba4b35d3af 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -245,6 +245,10 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5483. revert MAPREDUCE-5357. (rkanter via tucu) + MAPREDUCE-5441. Changed MR AM to return RUNNING state if exiting when RM + commands to reboot, so that client can continue to track the overall job. + (Jian He via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index be2e49bf916..a146a0cee4b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -993,7 +993,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, } } - private static JobState getExternalState(JobStateInternal smState) { + private JobState getExternalState(JobStateInternal smState) { switch (smState) { case KILL_WAIT: case KILL_ABORT: @@ -1005,7 +1005,13 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, case FAIL_ABORT: return JobState.FAILED; case REBOOT: - return JobState.ERROR; + if (appContext.isLastAMRetry()) { + return JobState.ERROR; + } else { + // In case of not last retry, return the external state as RUNNING since + // otherwise JobClient will exit when it polls the AM for job state + return JobState.RUNNING; + } default: return JobState.valueOf(smState.name()); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java index 48e7e7ef182..53b5aaa5c89 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRApp.java @@ -29,6 +29,7 @@ import java.util.Iterator; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.MRConfig; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; @@ -41,6 +42,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.app.job.Task; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.JobUpdatedNodesEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; @@ -51,12 +54,15 @@ import org.apache.hadoop.mapreduce.v2.app.job.impl.TaskAttemptImpl; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent; import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.util.Clock; import org.junit.Test; /** @@ -368,6 +374,47 @@ public class TestMRApp { app.waitForState(job, JobState.ERROR); } + @Test + public void testJobRebootNotLastRetry() throws Exception { + MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true); + Job job = app.submit(new Configuration()); + app.waitForState(job, JobState.RUNNING); + Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task task = it.next(); + app.waitForState(task, TaskState.RUNNING); + + //send an reboot event + app.getContext().getEventHandler().handle(new JobEvent(job.getID(), + JobEventType.JOB_AM_REBOOT)); + + // return exteranl state as RUNNING since otherwise the JobClient will + // prematurely exit. + app.waitForState(job, JobState.RUNNING); + } + + @Test + public void testJobRebootOnLastRetry() throws Exception { + // make startCount as 2 since this is last retry which equals to + // DEFAULT_MAX_AM_RETRY + MRApp app = new MRApp(1, 0, false, this.getClass().getName(), true, 2); + + Configuration conf = new Configuration(); + Job job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task task = it.next(); + app.waitForState(task, TaskState.RUNNING); + + //send an reboot event + app.getContext().getEventHandler().handle(new JobEvent(job.getID(), + JobEventType.JOB_AM_REBOOT)); + + // return exteranl state as ERROR if this is the last retry + app.waitForState(job, JobState.ERROR); + } + private final class MRAppWithSpiedJob extends MRApp { private JobImpl spiedJob; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java index ad5745e1364..72fc4f4471e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestJobImpl.java @@ -142,7 +142,7 @@ public class TestJobImpl { "testName", "testNodeName", "\"key2\"=\"value2\" \"key1\"=\"value1\" ", "tag1,tag2"); dispatcher.register(EventType.class, jseHandler); - JobImpl job = createStubbedJob(conf, dispatcher, 0); + JobImpl job = createStubbedJob(conf, dispatcher, 0, null); job.handle(new JobEvent(job.getID(), JobEventType.JOB_INIT)); assertJobState(job, JobStateInternal.INITED); job.handle(new JobStartEvent(job.getID())); @@ -170,7 +170,7 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createRunningStubbedJob(conf, dispatcher, 2); + JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); @@ -195,7 +195,7 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createRunningStubbedJob(conf, dispatcher, 2); + JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); @@ -239,7 +239,9 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createStubbedJob(conf, dispatcher, 2); + AppContext mockContext = mock(AppContext.class); + when(mockContext.isLastAMRetry()).thenReturn(false); + JobImpl job = createStubbedJob(conf, dispatcher, 2, mockContext); JobId jobId = job.getID(); job.handle(new JobEvent(jobId, JobEventType.JOB_INIT)); assertJobState(job, JobStateInternal.INITED); @@ -248,6 +250,10 @@ public class TestJobImpl { job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT)); assertJobState(job, JobStateInternal.REBOOT); + // return the external state as RUNNING since otherwise JobClient will + // exit when it polls the AM for job state + Assert.assertEquals(JobState.RUNNING, job.getState()); + dispatcher.stop(); commitHandler.stop(); } @@ -256,6 +262,7 @@ public class TestJobImpl { public void testRebootedDuringCommit() throws Exception { Configuration conf = new Configuration(); conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir); + conf.setInt(MRJobConfig.MR_AM_MAX_ATTEMPTS, 2); AsyncDispatcher dispatcher = new AsyncDispatcher(); dispatcher.init(conf); dispatcher.start(); @@ -266,13 +273,18 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createRunningStubbedJob(conf, dispatcher, 2); + AppContext mockContext = mock(AppContext.class); + when(mockContext.isLastAMRetry()).thenReturn(true); + JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, mockContext); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); syncBarrier.await(); job.handle(new JobEvent(job.getID(), JobEventType.JOB_AM_REBOOT)); assertJobState(job, JobStateInternal.REBOOT); + // return the external state as FAILED since this is last retry. + Assert.assertEquals(JobState.ERROR, job.getState()); + dispatcher.stop(); commitHandler.stop(); } @@ -301,7 +313,7 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createStubbedJob(conf, dispatcher, 2); + JobImpl job = createStubbedJob(conf, dispatcher, 2, null); JobId jobId = job.getID(); job.handle(new JobEvent(jobId, JobEventType.JOB_INIT)); assertJobState(job, JobStateInternal.INITED); @@ -328,7 +340,7 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createRunningStubbedJob(conf, dispatcher, 2); + JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); completeJobTasks(job); assertJobState(job, JobStateInternal.COMMITTING); @@ -352,7 +364,7 @@ public class TestJobImpl { createCommitterEventHandler(dispatcher, committer); commitHandler.init(conf); commitHandler.start(); - JobImpl job = createRunningStubbedJob(conf, dispatcher, 2); + JobImpl job = createRunningStubbedJob(conf, dispatcher, 2, null); //Fail one task. This should land the JobImpl in the FAIL_WAIT state job.handle(new JobTaskEvent( @@ -388,7 +400,7 @@ public class TestJobImpl { //Job has only 1 mapper task. No reducers conf.setInt(MRJobConfig.NUM_REDUCES, 0); conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, 1); - JobImpl job = createRunningStubbedJob(conf, dispatcher, 1); + JobImpl job = createRunningStubbedJob(conf, dispatcher, 1, null); //Fail / finish all the tasks. This should land the JobImpl directly in the //FAIL_ABORT state @@ -440,7 +452,7 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createStubbedJob(conf, dispatcher, 2); + JobImpl job = createStubbedJob(conf, dispatcher, 2, null); JobId jobId = job.getID(); job.handle(new JobEvent(jobId, JobEventType.JOB_INIT)); assertJobState(job, JobStateInternal.INITED); @@ -477,7 +489,7 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createStubbedJob(conf, dispatcher, 2); + JobImpl job = createStubbedJob(conf, dispatcher, 2, null); JobId jobId = job.getID(); job.handle(new JobEvent(jobId, JobEventType.JOB_INIT)); assertJobState(job, JobStateInternal.INITED); @@ -687,7 +699,7 @@ public class TestJobImpl { commitHandler.init(conf); commitHandler.start(); - JobImpl job = createStubbedJob(conf, dispatcher, 2); + JobImpl job = createStubbedJob(conf, dispatcher, 2, null); JobId jobId = job.getID(); job.handle(new JobEvent(jobId, JobEventType.JOB_INIT)); assertJobState(job, JobStateInternal.INITED); @@ -735,12 +747,12 @@ public class TestJobImpl { } private static StubbedJob createStubbedJob(Configuration conf, - Dispatcher dispatcher, int numSplits) { + Dispatcher dispatcher, int numSplits, AppContext appContext) { JobID jobID = JobID.forName("job_1234567890000_0001"); JobId jobId = TypeConverter.toYarn(jobID); StubbedJob job = new StubbedJob(jobId, ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 0), 0), - conf,dispatcher.getEventHandler(), true, "somebody", numSplits); + conf,dispatcher.getEventHandler(), true, "somebody", numSplits, appContext); dispatcher.register(JobEventType.class, job); EventHandler mockHandler = mock(EventHandler.class); dispatcher.register(TaskEventType.class, mockHandler); @@ -751,8 +763,8 @@ public class TestJobImpl { } private static StubbedJob createRunningStubbedJob(Configuration conf, - Dispatcher dispatcher, int numSplits) { - StubbedJob job = createStubbedJob(conf, dispatcher, numSplits); + Dispatcher dispatcher, int numSplits, AppContext appContext) { + StubbedJob job = createStubbedJob(conf, dispatcher, numSplits, appContext); job.handle(new JobEvent(job.getID(), JobEventType.JOB_INIT)); assertJobState(job, JobStateInternal.INITED); job.handle(new JobStartEvent(job.getID())); @@ -880,13 +892,13 @@ public class TestJobImpl { } public StubbedJob(JobId jobId, ApplicationAttemptId applicationAttemptId, - Configuration conf, EventHandler eventHandler, - boolean newApiCommitter, String user, int numSplits) { + Configuration conf, EventHandler eventHandler, boolean newApiCommitter, + String user, int numSplits, AppContext appContext) { super(jobId, applicationAttemptId, conf, eventHandler, null, new JobTokenSecretManager(), new Credentials(), new SystemClock(), Collections. emptyMap(), MRAppMetrics.create(), null, newApiCommitter, user, - System.currentTimeMillis(), null, null, null, null); + System.currentTimeMillis(), null, appContext, null, null); initTransition = getInitTransition(numSplits); localFactory = stateMachineFactory.addTransition(JobStateInternal.NEW, From eef32121d1d81076fd7e49ae65af03d1a6837dca Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Thu, 29 Aug 2013 22:35:12 +0000 Subject: [PATCH 094/153] HADOOP-9889. Refresh the Krb5 configuration when creating a new kdc in Hadoop-MiniKDC (Wei Yan via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518847 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../main/java/org/apache/hadoop/minikdc/MiniKdc.java | 12 ++++++++++++ .../java/org/apache/hadoop/minikdc/TestMiniKdc.java | 6 +++++- 3 files changed, 20 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c1f8d39511d..660085aa89c 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -327,6 +327,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9487 Deprecation warnings in Configuration should go to their own log or otherwise be suppressible (Chu Tong via stevel) + HADOOP-9889. Refresh the Krb5 configuration when creating a new kdc in + Hadoop-MiniKDC (Wei Yan via Sandy Ryza) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) diff --git a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java index c8aa78a9f34..ed2142b46ca 100644 --- a/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java +++ b/hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java @@ -59,6 +59,7 @@ import java.io.FileReader; import java.io.InputStream; import java.io.InputStreamReader; import java.io.StringReader; +import java.lang.reflect.Method; import java.net.InetAddress; import java.net.ServerSocket; import java.text.MessageFormat; @@ -432,6 +433,17 @@ public class MiniKdc { System.setProperty("sun.security.krb5.debug", conf.getProperty(DEBUG, "false")); + + // refresh the config + Class classRef; + if (System.getProperty("java.vendor").contains("IBM")) { + classRef = Class.forName("com.ibm.security.krb5.internal.Config"); + } else { + classRef = Class.forName("sun.security.krb5.Config"); + } + Method refreshMethod = classRef.getMethod("refresh", new Class[0]); + refreshMethod.invoke(classRef, new Object[0]); + LOG.info("MiniKdc listening at port: {}", getPort()); LOG.info("MiniKdc setting JVM krb5.conf to: {}", krb5conf.getAbsolutePath()); diff --git a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java index c1fc56daecd..c052bb1425a 100644 --- a/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java +++ b/hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java @@ -30,7 +30,11 @@ import javax.security.auth.login.Configuration; import javax.security.auth.login.LoginContext; import java.io.File; import java.security.Principal; -import java.util.*; +import java.util.Set; +import java.util.Map; +import java.util.HashSet; +import java.util.HashMap; +import java.util.Arrays; public class TestMiniKdc extends KerberosSecurityTestcase { From eb484bb5629e57c97192b6794f30c1fbb290b6ee Mon Sep 17 00:00:00 2001 From: Konstantin Shvachko Date: Thu, 29 Aug 2013 23:08:02 +0000 Subject: [PATCH 095/153] HDFS-5077. NPE in FSNamesystem.commitBlockSynchronization(). Contributed by Plamen Jeliazkov. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518851 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/FSNamesystem.java | 27 ++++++++++++------- .../TestCommitBlockSynchronization.java | 19 +++++++++++++ 3 files changed, 39 insertions(+), 10 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 90ba9465e8f..84100e18c4d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -401,6 +401,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5132. Deadlock in NameNode between SafeModeMonitor#run and DatanodeManager#handleHeartbeat. (kihwal) + HDFS-5077. NPE in FSNamesystem.commitBlockSynchronization(). + (Plamen Jeliazkov via shv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 178f3d9b087..a397ce94fd1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -174,7 +174,6 @@ import org.apache.hadoop.hdfs.server.common.Util; import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease; -import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; @@ -3772,24 +3771,32 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // find the DatanodeDescriptor objects // There should be no locations in the blockManager till now because the // file is underConstruction - DatanodeDescriptor[] descriptors = null; + List targetList = + new ArrayList(newtargets.length); if (newtargets.length > 0) { - descriptors = new DatanodeDescriptor[newtargets.length]; - for(int i = 0; i < newtargets.length; i++) { - descriptors[i] = blockManager.getDatanodeManager().getDatanode( - newtargets[i]); + for (DatanodeID newtarget : newtargets) { + // try to get targetNode + DatanodeDescriptor targetNode = + blockManager.getDatanodeManager().getDatanode(newtarget); + if (targetNode != null) + targetList.add(targetNode); + else if (LOG.isDebugEnabled()) { + LOG.debug("DatanodeDescriptor (=" + newtarget + ") not found"); + } } } - if ((closeFile) && (descriptors != null)) { + if ((closeFile) && !targetList.isEmpty()) { // the file is getting closed. Insert block locations into blockManager. // Otherwise fsck will report these blocks as MISSING, especially if the // blocksReceived from Datanodes take a long time to arrive. - for (int i = 0; i < descriptors.length; i++) { - descriptors[i].addBlock(storedBlock); + for (DatanodeDescriptor targetNode : targetList) { + targetNode.addBlock(storedBlock); } } // add pipeline locations into the INodeUnderConstruction - pendingFile.setLastBlock(storedBlock, descriptors); + DatanodeDescriptor[] targetArray = + new DatanodeDescriptor[targetList.size()]; + pendingFile.setLastBlock(storedBlock, targetList.toArray(targetArray)); } if (closeFile) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java index 53007196200..f40b799d1a8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCommitBlockSynchronization.java @@ -169,4 +169,23 @@ public class TestCommitBlockSynchronization { namesystemSpy.commitBlockSynchronization( lastBlock, genStamp, length, true, false, newTargets, null); } + + @Test + public void testCommitBlockSynchronizationWithCloseAndNonExistantTarget() + throws IOException { + INodeFileUnderConstruction file = mock(INodeFileUnderConstruction.class); + Block block = new Block(blockId, length, genStamp); + FSNamesystem namesystemSpy = makeNameSystemSpy(block, file); + DatanodeID[] newTargets = new DatanodeID[]{ + new DatanodeID("0.0.0.0", "nonexistantHost", "1", 0, 0, 0)}; + + ExtendedBlock lastBlock = new ExtendedBlock(); + namesystemSpy.commitBlockSynchronization( + lastBlock, genStamp, length, true, + false, newTargets, null); + + // Repeat the call to make sure it returns true + namesystemSpy.commitBlockSynchronization( + lastBlock, genStamp, length, true, false, newTargets, null); + } } From 2cc851a66e86b82ed6f9fc3b86c2df3001519c51 Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Thu, 29 Aug 2013 23:45:16 +0000 Subject: [PATCH 096/153] MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518857 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../org/apache/hadoop/mapred/YarnChild.java | 18 ++++++++---------- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 8ba4b35d3af..1cc5e1b333c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -162,6 +162,8 @@ Release 2.3.0 - UNRELEASED OPTIMIZATIONS + MAPREDUCE-5484. YarnChild unnecessarily loads job conf twice (Sandy Ryza) + BUG FIXES MAPREDUCE-5316. job -list-attempt-ids command does not handle illegal diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java index e4d0041d211..31007097129 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/YarnChild.java @@ -75,9 +75,9 @@ class YarnChild { Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); LOG.debug("Child starting"); - final JobConf defaultConf = new JobConf(); - defaultConf.addResource(MRJobConfig.JOB_CONF_FILE); - UserGroupInformation.setConfiguration(defaultConf); + final JobConf job = new JobConf(); + job.addResource(MRJobConfig.JOB_CONF_FILE); + UserGroupInformation.setConfiguration(job); String host = args[0]; int port = Integer.parseInt(args[1]); @@ -111,7 +111,7 @@ class YarnChild { @Override public TaskUmbilicalProtocol run() throws Exception { return (TaskUmbilicalProtocol)RPC.getProxy(TaskUmbilicalProtocol.class, - TaskUmbilicalProtocol.versionID, address, defaultConf); + TaskUmbilicalProtocol.versionID, address, job); } }); @@ -140,7 +140,7 @@ class YarnChild { YarnChild.taskid = task.getTaskID(); // Create the job-conf and set credentials - final JobConf job = configureTask(task, credentials, jt); + configureTask(job, task, credentials, jt); // log the system properties String systemPropsToLog = MRApps.getSystemPropertiesToLog(job); @@ -260,11 +260,10 @@ class YarnChild { job.set(MRJobConfig.JOB_LOCAL_DIR,workDir.toString()); } - private static JobConf configureTask(Task task, Credentials credentials, - Token jt) throws IOException { - final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE); + private static void configureTask(JobConf job, Task task, + Credentials credentials, Token jt) throws IOException { job.setCredentials(credentials); - + ApplicationAttemptId appAttemptId = ConverterUtils.toContainerId( System.getenv(Environment.CONTAINER_ID.name())) @@ -306,7 +305,6 @@ class YarnChild { writeLocalJobFile(localTaskFile, job); task.setJobFile(localTaskFile.toString()); task.setConf(job); - return job; } /** From 87e449fd239b68339f9008897a74ee155e98f2ba Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 30 Aug 2013 00:32:03 +0000 Subject: [PATCH 097/153] HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG. (Shinichi Yamashita via Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518862 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 3 +++ .../src/main/java/org/apache/hadoop/fs/Stat.java | 6 ++++++ .../src/main/java/org/apache/hadoop/util/Shell.java | 7 +++++++ .../src/test/java/org/apache/hadoop/fs/TestStat.java | 6 ++++++ 4 files changed, 22 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 660085aa89c..7f88052df20 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -357,6 +357,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9877. Fix listing of snapshot directories in globStatus. (Binglin Chang via Andrew Wang) + HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG. + (Shinichi Yamashita via Andrew Wang) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java index 36dd8811e77..960f5cef3c3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java @@ -20,6 +20,8 @@ package org.apache.hadoop.fs; import java.io.BufferedReader; import java.io.FileNotFoundException; import java.io.IOException; +import java.util.HashMap; +import java.util.Map; import java.util.NoSuchElementException; import java.util.StringTokenizer; @@ -62,6 +64,10 @@ public class Stat extends Shell { this.path = new Path(qualified.toUri().getPath()); this.blockSize = blockSize; this.dereference = deref; + // LANG = C setting + Map env = new HashMap(); + env.put("LANG", "C"); + setEnvironment(env); } public FileStatus getFileStatus() throws IOException { diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java index 9f6fcc21257..8013f22b97a 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java @@ -540,6 +540,13 @@ abstract public class Shell { protected abstract void parseExecResult(BufferedReader lines) throws IOException; + /** + * Get the environment variable + */ + public String getEnvironment(String env) { + return environment.get(env); + } + /** get the current sub-process executing the given command * @return process executing the command */ diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java index 4397f2d534c..506facf0c64 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -119,4 +120,9 @@ public class TestStat { // expected } } + + @Test(timeout=10000) + public void testStatEnvironment() throws Exception { + assertEquals(stat.getEnvironment("LANG"), "C"); + } } From 2088309d66541d74f2abde4e28bbf301aad7c0be Mon Sep 17 00:00:00 2001 From: Ivan Mitic Date: Fri, 30 Aug 2013 01:04:35 +0000 Subject: [PATCH 098/153] HADOOP-9774. RawLocalFileSystem.listStatus() return absolute paths when input path is relative on Windows. Contributed by Shanyu Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518865 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 ++ .../main/java/org/apache/hadoop/fs/Path.java | 12 ++++++ .../apache/hadoop/fs/RawLocalFileSystem.java | 6 ++- .../apache/hadoop/fs/TestLocalFileSystem.java | 15 ++++++++ .../java/org/apache/hadoop/fs/TestPath.java | 38 ++++++++++++++++++- 5 files changed, 71 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 7f88052df20..7e12e7f288b 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -438,6 +438,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9894. Race condition in Shell leads to logged error stream handling exceptions (Arpit Agarwal) + HADOOP-9774. RawLocalFileSystem.listStatus() return absolute paths when + input path is relative on Windows. (Shanyu Zhao via ivanmi) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java index 4b50882eae8..2d3acd0f8bb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Path.java @@ -182,6 +182,18 @@ public class Path implements Comparable { /** Construct a Path from components. */ public Path(String scheme, String authority, String path) { checkPathArg( path ); + + // add a slash in front of paths with Windows drive letters + if (hasWindowsDrive(path) && path.charAt(0) != '/') { + path = "/" + path; + } + + // add "./" in front of Linux relative paths so that a path containing + // a colon e.q. "a:b" will not be interpreted as scheme "a". + if (!WINDOWS && path.charAt(0) != '/') { + path = "./" + path; + } + initialize(scheme, authority, path, null); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java index 42f77fc3508..c2e2458fe0c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java @@ -393,7 +393,7 @@ public class RawLocalFileSystem extends FileSystem { new DeprecatedRawLocalFileStatus(localf, getDefaultBlockSize(f), this)}; } - File[] names = localf.listFiles(); + String[] names = localf.list(); if (names == null) { return null; } @@ -401,7 +401,9 @@ public class RawLocalFileSystem extends FileSystem { int j = 0; for (int i = 0; i < names.length; i++) { try { - results[j] = getFileStatus(new Path(names[i].getAbsolutePath())); + // Assemble the path using the Path 3 arg constructor to make sure + // paths with colon are properly resolved on Linux + results[j] = getFileStatus(new Path(f, new Path(null, null, names[i]))); j++; } catch (FileNotFoundException e) { // ignore the files not found since the dir list may have have changed diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java index dacb2c9b82f..8f427500c86 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java @@ -280,6 +280,21 @@ public class TestLocalFileSystem { stats[0].getPath().toUri().getPath()); } + @Test + public void testListStatusReturnConsistentPathOnWindows() throws IOException { + assumeTrue(Shell.WINDOWS); + String dirNoDriveSpec = TEST_ROOT_DIR; + if (dirNoDriveSpec.charAt(1) == ':') + dirNoDriveSpec = dirNoDriveSpec.substring(2); + + File file = new File(dirNoDriveSpec, "foo"); + file.mkdirs(); + FileStatus[] stats = fileSys.listStatus(new Path(dirNoDriveSpec)); + assertEquals("Unexpected number of stats", 1, stats.length); + assertEquals("Bad path from stat", new Path(file.getPath()).toUri().getPath(), + stats[0].getPath().toUri().getPath()); + } + @Test(timeout = 10000) public void testReportChecksumFailure() throws IOException { base.mkdirs(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java index 0f6bf71bded..f0a457b4127 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestPath.java @@ -158,7 +158,43 @@ public class TestPath extends TestCase { assertEquals(new Path("c:/foo"), new Path("d:/bar", "c:/foo")); } } - + + @Test (timeout = 30000) + public void testPathThreeArgContructor() { + assertEquals(new Path("foo"), new Path(null, null, "foo")); + assertEquals(new Path("scheme:///foo"), new Path("scheme", null, "/foo")); + assertEquals( + new Path("scheme://authority/foo"), + new Path("scheme", "authority", "/foo")); + + if (Path.WINDOWS) { + assertEquals(new Path("c:/foo/bar"), new Path(null, null, "c:/foo/bar")); + assertEquals(new Path("c:/foo/bar"), new Path(null, null, "/c:/foo/bar")); + } else { + assertEquals(new Path("./a:b"), new Path(null, null, "a:b")); + } + + // Resolution tests + if (Path.WINDOWS) { + assertEquals( + new Path("c:/foo/bar"), + new Path("/fou", new Path(null, null, "c:/foo/bar"))); + assertEquals( + new Path("c:/foo/bar"), + new Path("/fou", new Path(null, null, "/c:/foo/bar"))); + assertEquals( + new Path("/foo/bar"), + new Path("/foo", new Path(null, null, "bar"))); + } else { + assertEquals( + new Path("/foo/bar/a:b"), + new Path("/foo/bar", new Path(null, null, "a:b"))); + assertEquals( + new Path("/a:b"), + new Path("/foo/bar", new Path(null, null, "/a:b"))); + } + } + @Test (timeout = 30000) public void testEquals() { assertFalse(new Path("/").equals(new Path("/foo"))); From 89fb4d8ffd32b06db42cc3e21d2a89e99deb7732 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 30 Aug 2013 01:12:40 +0000 Subject: [PATCH 099/153] YARN-707. Added user information also in the YARN ClientToken so that AMs can implement authorization based on incoming users. Contributed by Jason Lowe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518868 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../client/ClientToAMTokenIdentifier.java | 14 +++- .../client/ClientToAMTokenSecretManager.java | 4 +- .../resourcemanager/ClientRMService.java | 8 ++- .../recovery/RMStateStore.java | 33 +++++---- .../server/resourcemanager/rmapp/RMApp.java | 4 +- .../resourcemanager/rmapp/RMAppImpl.java | 46 ++++++------ .../rmapp/attempt/RMAppAttempt.java | 15 ++-- .../rmapp/attempt/RMAppAttemptImpl.java | 44 +++++------- .../ClientToAMTokenSecretManagerInRM.java | 13 +++- .../yarn/server/resourcemanager/MockRM.java | 5 ++ .../server/resourcemanager/TestRMRestart.java | 30 +++++--- .../applicationsmanager/MockAsm.java | 3 +- .../recovery/TestRMStateStore.java | 70 +++++++++---------- .../resourcemanager/rmapp/MockRMApp.java | 3 +- .../rmapp/TestRMAppTransitions.java | 4 +- .../security/TestClientToAMTokens.java | 64 ++++++++++++----- 17 files changed, 215 insertions(+), 148 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index c9e3df0f54f..13e52bfe24f 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -39,6 +39,9 @@ Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES + YARN-707. Added user information also in the YARN ClientToken so that AMs + can implement authorization based on incoming users. (Jason Lowe via vinodkv) + NEW FEATURES IMPROVEMENTS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java index d9c576eead3..81916bc07e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenIdentifier.java @@ -39,6 +39,7 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { public static final Text KIND_NAME = new Text("YARN_CLIENT_TOKEN"); private ApplicationAttemptId applicationAttemptId; + private Text clientName = new Text(); // TODO: Add more information in the tokenID such that it is not // transferrable, more secure etc. @@ -46,21 +47,27 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { public ClientToAMTokenIdentifier() { } - public ClientToAMTokenIdentifier(ApplicationAttemptId id) { + public ClientToAMTokenIdentifier(ApplicationAttemptId id, String client) { this(); this.applicationAttemptId = id; + this.clientName = new Text(client); } public ApplicationAttemptId getApplicationAttemptID() { return this.applicationAttemptId; } + public String getClientName() { + return this.clientName.toString(); + } + @Override public void write(DataOutput out) throws IOException { out.writeLong(this.applicationAttemptId.getApplicationId() .getClusterTimestamp()); out.writeInt(this.applicationAttemptId.getApplicationId().getId()); out.writeInt(this.applicationAttemptId.getAttemptId()); + this.clientName.write(out); } @Override @@ -68,6 +75,7 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { this.applicationAttemptId = ApplicationAttemptId.newInstance( ApplicationId.newInstance(in.readLong(), in.readInt()), in.readInt()); + this.clientName.readFields(in); } @Override @@ -77,10 +85,10 @@ public class ClientToAMTokenIdentifier extends TokenIdentifier { @Override public UserGroupInformation getUser() { - if (this.applicationAttemptId == null) { + if (this.clientName == null) { return null; } - return UserGroupInformation.createRemoteUser(this.applicationAttemptId.toString()); + return UserGroupInformation.createRemoteUser(this.clientName.toString()); } @InterfaceAudience.Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java index 65d854d1664..541f7a88c4e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java @@ -37,7 +37,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; public class ClientToAMTokenSecretManager extends BaseClientToAMTokenSecretManager { - // Only one client-token and one master-key for AM + // Only one master-key for AM private SecretKey masterKey; public ClientToAMTokenSecretManager( @@ -53,7 +53,7 @@ public class ClientToAMTokenSecretManager extends @Override public SecretKey getMasterKey(ApplicationAttemptId applicationAttemptID) { - // Only one client-token and one master-key for AM, just return that. + // Only one master-key for AM, just return that. return this.masterKey; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index 81fdd56b833..d2888e77da0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -248,7 +248,8 @@ public class ClientRMService extends AbstractService implements boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, applicationId); ApplicationReport report = - application.createAndGetApplicationReport(allowAccess); + application.createAndGetApplicationReport(callerUGI.getUserName(), + allowAccess); GetApplicationReportResponse response = recordFactory .newRecordInstance(GetApplicationReportResponse.class); @@ -425,7 +426,8 @@ public class ClientRMService extends AbstractService implements } boolean allowAccess = checkAccess(callerUGI, application.getUser(), ApplicationAccessType.VIEW_APP, application.getApplicationId()); - reports.add(application.createAndGetApplicationReport(allowAccess)); + reports.add(application.createAndGetApplicationReport( + callerUGI.getUserName(), allowAccess)); } GetApplicationsResponse response = @@ -471,7 +473,7 @@ public class ClientRMService extends AbstractService implements apps.size()); for (RMApp app : apps) { if (app.getQueue().equals(queueInfo.getQueueName())) { - appReports.add(app.createAndGetApplicationReport(true)); + appReports.add(app.createAndGetApplicationReport(null, true)); } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index 865e7260f44..179b721bcdf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -24,6 +24,8 @@ import java.util.HashSet; import java.util.Map; import java.util.Set; +import javax.crypto.SecretKey; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -44,7 +46,6 @@ import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; -import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.server.resourcemanager.recovery.records.impl.pb.ApplicationStateDataPBImpl; @@ -75,14 +76,14 @@ public abstract class RMStateStore extends AbstractService { public static class ApplicationAttemptState { final ApplicationAttemptId attemptId; final Container masterContainer; - final Credentials appAttemptTokens; + final Credentials appAttemptCredentials; public ApplicationAttemptState(ApplicationAttemptId attemptId, Container masterContainer, - Credentials appAttemptTokens) { + Credentials appAttemptCredentials) { this.attemptId = attemptId; this.masterContainer = masterContainer; - this.appAttemptTokens = appAttemptTokens; + this.appAttemptCredentials = appAttemptCredentials; } public Container getMasterContainer() { @@ -91,8 +92,8 @@ public abstract class RMStateStore extends AbstractService { public ApplicationAttemptId getAttemptId() { return attemptId; } - public Credentials getAppAttemptTokens() { - return appAttemptTokens; + public Credentials getAppAttemptCredentials() { + return appAttemptCredentials; } } @@ -265,7 +266,7 @@ public abstract class RMStateStore extends AbstractService { * RMAppAttemptStoredEvent will be sent on completion to notify the RMAppAttempt */ public synchronized void storeApplicationAttempt(RMAppAttempt appAttempt) { - Credentials credentials = getTokensFromAppAttempt(appAttempt); + Credentials credentials = getCredentialsFromAppAttempt(appAttempt); ApplicationAttemptState attemptState = new ApplicationAttemptState(appAttempt.getAppAttemptId(), @@ -365,7 +366,7 @@ public abstract class RMStateStore extends AbstractService { app.getSubmitTime(), app.getApplicationSubmissionContext(), app.getUser()); for(RMAppAttempt appAttempt : app.getAppAttempts().values()) { - Credentials credentials = getTokensFromAppAttempt(appAttempt); + Credentials credentials = getCredentialsFromAppAttempt(appAttempt); ApplicationAttemptState attemptState = new ApplicationAttemptState(appAttempt.getAppAttemptId(), appAttempt.getMasterContainer(), credentials); @@ -395,17 +396,21 @@ public abstract class RMStateStore extends AbstractService { // YARN-986 public static final Text AM_RM_TOKEN_SERVICE = new Text( "AM_RM_TOKEN_SERVICE"); + + public static final Text AM_CLIENT_TOKEN_MASTER_KEY_NAME = + new Text("YARN_CLIENT_TOKEN_MASTER_KEY"); - private Credentials getTokensFromAppAttempt(RMAppAttempt appAttempt) { + private Credentials getCredentialsFromAppAttempt(RMAppAttempt appAttempt) { Credentials credentials = new Credentials(); Token appToken = appAttempt.getAMRMToken(); if(appToken != null){ credentials.addToken(AM_RM_TOKEN_SERVICE, appToken); } - Token clientToAMToken = - appAttempt.getClientToAMToken(); - if(clientToAMToken != null){ - credentials.addToken(clientToAMToken.getService(), clientToAMToken); + SecretKey clientTokenMasterKey = + appAttempt.getClientTokenMasterKey(); + if(clientTokenMasterKey != null){ + credentials.addSecretKey(AM_CLIENT_TOKEN_MASTER_KEY_NAME, + clientTokenMasterKey.getEncoded()); } return credentials; } @@ -445,7 +450,7 @@ public abstract class RMStateStore extends AbstractService { ((RMStateStoreAppAttemptEvent) event).getAppAttemptState(); Exception storedException = null; - Credentials credentials = attemptState.getAppAttemptTokens(); + Credentials credentials = attemptState.getAppAttemptCredentials(); ByteBuffer appAttemptTokens = null; try { if(credentials != null){ diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java index a4c11541201..f1c496a6c62 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java @@ -128,10 +128,12 @@ public interface RMApp extends EventHandler { *
  • resource usage report - all values are -1
  • * * + * @param clientUserName the user name of the client requesting the report * @param allowAccess whether to allow full access to the report * @return the {@link ApplicationReport} detailing the status of the application. */ - ApplicationReport createAndGetApplicationReport(boolean allowAccess); + ApplicationReport createAndGetApplicationReport(String clientUserName, + boolean allowAccess); /** * To receive the collection of all {@link RMNode}s whose updates have been diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 79398840ce1..c69aed3473f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp; -import java.io.IOException; import java.util.Collection; import java.util.Collections; import java.util.EnumSet; @@ -411,7 +410,8 @@ public class RMAppImpl implements RMApp, Recoverable { } @Override - public ApplicationReport createAndGetApplicationReport(boolean allowAccess) { + public ApplicationReport createAndGetApplicationReport(String clientUserName, + boolean allowAccess) { this.readLock.lock(); try { @@ -432,15 +432,18 @@ public class RMAppImpl implements RMApp, Recoverable { currentApplicationAttemptId = this.currentAttempt.getAppAttemptId(); trackingUrl = this.currentAttempt.getTrackingUrl(); origTrackingUrl = this.currentAttempt.getOriginalTrackingUrl(); - Token attemptClientToAMToken = - this.currentAttempt.getClientToAMToken(); - if (attemptClientToAMToken != null) { - clientToAMToken = - BuilderUtils.newClientToAMToken( - attemptClientToAMToken.getIdentifier(), - attemptClientToAMToken.getKind().toString(), - attemptClientToAMToken.getPassword(), - attemptClientToAMToken.getService().toString()); + if (UserGroupInformation.isSecurityEnabled() + && clientUserName != null) { + Token attemptClientToAMToken = + new Token( + new ClientToAMTokenIdentifier( + currentApplicationAttemptId, clientUserName), + rmContext.getClientToAMTokenSecretManager()); + clientToAMToken = BuilderUtils.newClientToAMToken( + attemptClientToAMToken.getIdentifier(), + attemptClientToAMToken.getKind().toString(), + attemptClientToAMToken.getPassword(), + attemptClientToAMToken.getService().toString()); } host = this.currentAttempt.getHost(); rpcPort = this.currentAttempt.getRpcPort(); @@ -451,20 +454,15 @@ public class RMAppImpl implements RMApp, Recoverable { if (currentAttempt != null && currentAttempt.getAppAttemptState() == RMAppAttemptState.LAUNCHED) { - try { - if (getApplicationSubmissionContext().getUnmanagedAM() && - getUser().equals(UserGroupInformation.getCurrentUser().getUserName())) { - Token token = currentAttempt.getAMRMToken(); - if (token != null) { - amrmToken = BuilderUtils.newAMRMToken(token.getIdentifier(), - token.getKind().toString(), token.getPassword(), - token.getService().toString()); - } + if (getApplicationSubmissionContext().getUnmanagedAM() && + clientUserName != null && getUser().equals(clientUserName)) { + Token token = currentAttempt.getAMRMToken(); + if (token != null) { + amrmToken = BuilderUtils.newAMRMToken(token.getIdentifier(), + token.getKind().toString(), token.getPassword(), + token.getService().toString()); } - } catch (IOException ex) { - LOG.warn("UserGroupInformation.getCurrentUser() error: " + - ex.toString(), ex); - } + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java index d93332a2190..e9f064d648e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java @@ -21,6 +21,8 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt; import java.util.List; import java.util.Set; +import javax.crypto.SecretKey; + import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; @@ -32,7 +34,6 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; -import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; /** @@ -90,12 +91,6 @@ public interface RMAppAttempt extends EventHandler { */ String getWebProxyBase(); - /** - * The token required by the clients to talk to the application attempt - * @return the token required by the clients to talk to the application attempt - */ - Token getClientToAMToken(); - /** * Diagnostics information for the application attempt. * @return diagnostics information for the application attempt. @@ -154,6 +149,12 @@ public interface RMAppAttempt extends EventHandler { */ Token getAMRMToken(); + /** + * The master key for client-to-AM tokens for this app attempt + * @return The master key for client-to-AM tokens for this app attempt + */ + SecretKey getClientTokenMasterKey(); + /** * Get application container and resource usage information. * @return an ApplicationResourceUsageReport object. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 1543110db03..00397cfa650 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -33,12 +33,13 @@ import java.util.concurrent.locks.ReentrantReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock.ReadLock; import java.util.concurrent.locks.ReentrantReadWriteLock.WriteLock; +import javax.crypto.SecretKey; + import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpConfig; -import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -60,8 +61,6 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; -import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; -import org.apache.hadoop.yarn.security.client.ClientToAMTokenSelector; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; @@ -126,9 +125,9 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { private final WriteLock writeLock; private final ApplicationAttemptId applicationAttemptId; - private Token clientToAMToken; private final ApplicationSubmissionContext submissionContext; private Token amrmToken = null; + private SecretKey clientTokenMasterKey = null; //nodes on while this attempt's containers ran private final Set ranNodes = @@ -499,8 +498,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { } @Override - public Token getClientToAMToken() { - return this.clientToAMToken; + public SecretKey getClientTokenMasterKey() { + return this.clientTokenMasterKey; } @Override @@ -659,7 +658,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { ApplicationAttemptState attemptState = appState.getAttempt(getAppAttemptId()); assert attemptState != null; setMasterContainer(attemptState.getMasterContainer()); - recoverAppAttemptTokens(attemptState.getAppAttemptTokens()); + recoverAppAttemptCredentials(attemptState.getAppAttemptCredentials()); LOG.info("Recovered attempt: AppId: " + getAppAttemptId().getApplicationId() + " AttemptId: " + getAppAttemptId() + " MasterContainer: " + masterContainer); @@ -668,17 +667,16 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { RMAppAttemptEventType.RECOVER)); } - private void recoverAppAttemptTokens(Credentials appAttemptTokens) { + private void recoverAppAttemptCredentials(Credentials appAttemptTokens) { if (appAttemptTokens == null) { return; } - if (UserGroupInformation.isSecurityEnabled()) { - ClientToAMTokenSelector clientToAMTokenSelector = - new ClientToAMTokenSelector(); - this.clientToAMToken = - clientToAMTokenSelector.selectToken(new Text(), - appAttemptTokens.getAllTokens()); + if (UserGroupInformation.isSecurityEnabled()) { + byte[] clientTokenMasterKeyBytes = appAttemptTokens.getSecretKey( + RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME); + clientTokenMasterKey = rmContext.getClientToAMTokenSecretManager() + .registerMasterKey(applicationAttemptId, clientTokenMasterKeyBytes); } // Only one AMRMToken is stored per-attempt, so this should be fine. Can't @@ -715,15 +713,9 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { .registerAppAttempt(appAttempt.applicationAttemptId); if (UserGroupInformation.isSecurityEnabled()) { - - appAttempt.rmContext.getClientToAMTokenSecretManager() - .registerApplication(appAttempt.applicationAttemptId); - - // create clientToAMToken - appAttempt.clientToAMToken = - new Token(new ClientToAMTokenIdentifier( - appAttempt.applicationAttemptId), - appAttempt.rmContext.getClientToAMTokenSecretManager()); + appAttempt.clientTokenMasterKey = appAttempt.rmContext + .getClientToAMTokenSecretManager() + .registerApplication(appAttempt.applicationAttemptId); } // create AMRMToken @@ -762,7 +754,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { message) ); - appAttempt.removeTokens(appAttempt); + appAttempt.removeCredentials(appAttempt); } } @@ -895,7 +887,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.eventHandler.handle(new AppRemovedSchedulerEvent(appAttemptId, finalAttemptState)); - appAttempt.removeTokens(appAttempt); + appAttempt.removeCredentials(appAttempt); } } @@ -1256,7 +1248,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { store.storeApplicationAttempt(this); } - private void removeTokens(RMAppAttemptImpl appAttempt) { + private void removeCredentials(RMAppAttemptImpl appAttempt) { // Unregister from the ClientToAMTokenSecretManager if (UserGroupInformation.isSecurityEnabled()) { appAttempt.rmContext.getClientToAMTokenSecretManager() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java index 23c21e72f05..b5efa9f651a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java @@ -33,9 +33,18 @@ public class ClientToAMTokenSecretManagerInRM extends private Map masterKeys = new HashMap(); - public synchronized void registerApplication( + public synchronized SecretKey registerApplication( ApplicationAttemptId applicationAttemptID) { - this.masterKeys.put(applicationAttemptID, generateSecret()); + SecretKey key = generateSecret(); + this.masterKeys.put(applicationAttemptID, key); + return key; + } + + public synchronized SecretKey registerMasterKey( + ApplicationAttemptId applicationAttemptID, byte[] keyData) { + SecretKey key = createSecretKey(keyData); + this.masterKeys.put(applicationAttemptID, key); + return key; } public synchronized void unRegisterApplication( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 522debbb464..3e2a8906a98 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -59,6 +59,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; +import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Level; @@ -387,6 +388,10 @@ public class MockRM extends ResourceManager { return this.rmDTSecretManager; } + public ClientToAMTokenSecretManagerInRM getClientToAMTokenSecretManager() { + return this.clientToAMSecretManager; + } + @Override protected void startWepApp() { // override to disable webapp diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index 7977b30db5a..c0f480bd1c4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -541,16 +541,21 @@ public class TestRMRestart { Assert.assertEquals(BuilderUtils.newContainerId(attemptId1, 1), attemptState.getMasterContainer().getId()); - // the appToken and clientToAMToken that are generated when RMAppAttempt - // is created, + // the appToken and clientTokenMasterKey that are generated when + // RMAppAttempt is created, HashSet> tokenSet = new HashSet>(); tokenSet.add(attempt1.getAMRMToken()); - tokenSet.add(attempt1.getClientToAMToken()); + byte[] clientTokenMasterKey = + attempt1.getClientTokenMasterKey().getEncoded(); - // assert application Token is saved + // assert application credentials are saved + Credentials savedCredentials = attemptState.getAppAttemptCredentials(); HashSet> savedTokens = new HashSet>(); - savedTokens.addAll(attemptState.getAppAttemptTokens().getAllTokens()); + savedTokens.addAll(savedCredentials.getAllTokens()); Assert.assertEquals(tokenSet, savedTokens); + Assert.assertArrayEquals("client token master key not saved", + clientTokenMasterKey, savedCredentials.getSecretKey( + RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME)); // start new RM MockRM rm2 = new TestSecurityMockRM(conf, memStore); @@ -564,13 +569,18 @@ public class TestRMRestart { Assert.assertNotNull(loadedAttempt1); savedTokens.clear(); savedTokens.add(loadedAttempt1.getAMRMToken()); - savedTokens.add(loadedAttempt1.getClientToAMToken()); Assert.assertEquals(tokenSet, savedTokens); - // assert clientToAMToken is recovered back to api-versioned - // clientToAMToken - Assert.assertEquals(attempt1.getClientToAMToken(), - loadedAttempt1.getClientToAMToken()); + // assert client token master key is recovered back to api-versioned + // client token master key + Assert.assertEquals("client token master key not restored", + attempt1.getClientTokenMasterKey(), + loadedAttempt1.getClientTokenMasterKey()); + + // assert secret manager also knows about the key + Assert.assertArrayEquals(clientTokenMasterKey, + rm2.getClientToAMTokenSecretManager().getMasterKey(attemptId1) + .getEncoded()); // Not testing ApplicationTokenSecretManager has the password populated back, // that is needed in work-preserving restart diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index 16c4878f9f7..93dbdc6cc7f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -115,7 +115,8 @@ public abstract class MockAsm extends MockApps { throw new UnsupportedOperationException("Not supported yet."); } @Override - public ApplicationReport createAndGetApplicationReport(boolean allowAccess) { + public ApplicationReport createAndGetApplicationReport( + String clientUserName,boolean allowAccess) { throw new UnsupportedOperationException("Not supported yet."); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java index 05916129e3b..aef92d5fdcc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java @@ -18,6 +18,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; +import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; @@ -25,12 +26,12 @@ import static org.junit.Assert.fail; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; -import java.util.ArrayList; import java.util.HashMap; import java.util.HashSet; -import java.util.List; import java.util.Map; +import javax.crypto.SecretKey; + import junit.framework.Assert; import org.apache.commons.logging.Log; @@ -55,7 +56,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; -import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; @@ -198,7 +198,7 @@ public class TestRMStateStore { ContainerId storeAttempt(RMStateStore store, ApplicationAttemptId attemptId, String containerIdStr, Token appToken, - Token clientToAMToken, TestDispatcher dispatcher) + SecretKey clientTokenMasterKey, TestDispatcher dispatcher) throws Exception { Container container = new ContainerPBImpl(); @@ -207,7 +207,8 @@ public class TestRMStateStore { when(mockAttempt.getAppAttemptId()).thenReturn(attemptId); when(mockAttempt.getMasterContainer()).thenReturn(container); when(mockAttempt.getAMRMToken()).thenReturn(appToken); - when(mockAttempt.getClientToAMToken()).thenReturn(clientToAMToken); + when(mockAttempt.getClientTokenMasterKey()) + .thenReturn(clientTokenMasterKey); dispatcher.attemptId = attemptId; dispatcher.storedException = null; store.storeApplicationAttempt(mockAttempt); @@ -215,7 +216,6 @@ public class TestRMStateStore { return container.getId(); } - @SuppressWarnings("unchecked") void testRMAppStateStore(RMStateStoreHelper stateStoreHelper) throws Exception { long submitTime = System.currentTimeMillis(); Configuration conf = new YarnConfiguration(); @@ -233,33 +233,33 @@ public class TestRMStateStore { ApplicationId appId1 = attemptId1.getApplicationId(); storeApp(store, appId1, submitTime); - // create application token1 for attempt1 - List> appAttemptToken1 = - generateTokens(attemptId1, appTokenMgr, clientToAMTokenMgr, conf); + // create application token and client token key for attempt1 + Token appAttemptToken1 = + generateAMRMToken(attemptId1, appTokenMgr); HashSet> attemptTokenSet1 = new HashSet>(); - attemptTokenSet1.addAll(appAttemptToken1); + attemptTokenSet1.add(appAttemptToken1); + SecretKey clientTokenKey1 = + clientToAMTokenMgr.registerApplication(attemptId1); ContainerId containerId1 = storeAttempt(store, attemptId1, "container_1352994193343_0001_01_000001", - (Token) (appAttemptToken1.get(0)), - (Token)(appAttemptToken1.get(1)), - dispatcher); + appAttemptToken1, clientTokenKey1, dispatcher); String appAttemptIdStr2 = "appattempt_1352994193343_0001_000002"; ApplicationAttemptId attemptId2 = ConverterUtils.toApplicationAttemptId(appAttemptIdStr2); - // create application token2 for attempt2 - List> appAttemptToken2 = - generateTokens(attemptId2, appTokenMgr, clientToAMTokenMgr, conf); + // create application token and client token key for attempt2 + Token appAttemptToken2 = + generateAMRMToken(attemptId2, appTokenMgr); HashSet> attemptTokenSet2 = new HashSet>(); - attemptTokenSet2.addAll(appAttemptToken2); + attemptTokenSet2.add(appAttemptToken2); + SecretKey clientTokenKey2 = + clientToAMTokenMgr.registerApplication(attemptId2); ContainerId containerId2 = storeAttempt(store, attemptId2, "container_1352994193343_0001_02_000001", - (Token) (appAttemptToken2.get(0)), - (Token)(appAttemptToken2.get(1)), - dispatcher); + appAttemptToken2, clientTokenKey2, dispatcher); ApplicationAttemptId attemptIdRemoved = ConverterUtils .toApplicationAttemptId("appattempt_1352994193343_0002_000001"); @@ -306,8 +306,12 @@ public class TestRMStateStore { assertEquals(containerId1, attemptState.getMasterContainer().getId()); // attempt1 applicationToken is loaded correctly HashSet> savedTokens = new HashSet>(); - savedTokens.addAll(attemptState.getAppAttemptTokens().getAllTokens()); + savedTokens.addAll(attemptState.getAppAttemptCredentials().getAllTokens()); assertEquals(attemptTokenSet1, savedTokens); + // attempt1 client token master key is loaded correctly + assertArrayEquals(clientTokenKey1.getEncoded(), + attemptState.getAppAttemptCredentials() + .getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME)); attemptState = appState.getAttempt(attemptId2); // attempt2 is loaded correctly @@ -317,8 +321,12 @@ public class TestRMStateStore { assertEquals(containerId2, attemptState.getMasterContainer().getId()); // attempt2 applicationToken is loaded correctly savedTokens.clear(); - savedTokens.addAll(attemptState.getAppAttemptTokens().getAllTokens()); + savedTokens.addAll(attemptState.getAppAttemptCredentials().getAllTokens()); assertEquals(attemptTokenSet2, savedTokens); + // attempt2 client token master key is loaded correctly + assertArrayEquals(clientTokenKey2.getEncoded(), + attemptState.getAppAttemptCredentials() + .getSecretKey(RMStateStore.AM_CLIENT_TOKEN_MASTER_KEY_NAME)); // assert store is in expected state after everything is cleaned assertTrue(stateStoreHelper.isFinalStateValid()); @@ -357,24 +365,14 @@ public class TestRMStateStore { Assert.assertEquals(sequenceNumber, secretManagerState.getDTSequenceNumber()); } - private List> generateTokens(ApplicationAttemptId attemptId, - AMRMTokenSecretManager appTokenMgr, - ClientToAMTokenSecretManagerInRM clientToAMTokenMgr, Configuration conf) { + private Token generateAMRMToken( + ApplicationAttemptId attemptId, + AMRMTokenSecretManager appTokenMgr) { AMRMTokenIdentifier appTokenId = new AMRMTokenIdentifier(attemptId); Token appToken = new Token(appTokenId, appTokenMgr); appToken.setService(new Text("appToken service")); - - ClientToAMTokenIdentifier clientToAMTokenId = - new ClientToAMTokenIdentifier(attemptId); - clientToAMTokenMgr.registerApplication(attemptId); - Token clientToAMToken = - new Token(clientToAMTokenId, clientToAMTokenMgr); - clientToAMToken.setService(new Text("clientToAMToken service")); - List> tokenPair = new ArrayList>(); - tokenPair.add(0, appToken); - tokenPair.add(1, clientToAMToken); - return tokenPair; + return appToken; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java index 3261dd43651..9c7a969919d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java @@ -143,7 +143,8 @@ public class MockRMApp implements RMApp { } @Override - public ApplicationReport createAndGetApplicationReport(boolean allowAccess) { + public ApplicationReport createAndGetApplicationReport( + String clientUserName, boolean allowAccess) { throw new UnsupportedOperationException("Not supported yet."); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index a4b74049f43..d6bd3f6a0c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -726,7 +726,9 @@ public class TestRMAppTransitions { public void testGetAppReport() { RMApp app = createNewTestApp(null); assertAppState(RMAppState.NEW, app); - ApplicationReport report = app.createAndGetApplicationReport(true); + ApplicationReport report = app.createAndGetApplicationReport(null, true); + Assert.assertNotNull(report.getApplicationResourceUsageReport()); + report = app.createAndGetApplicationReport("clientuser", true); Assert.assertNotNull(report.getApplicationResourceUsageReport()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java index fc2fda85202..97a3c268b09 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientToAMTokens.java @@ -115,7 +115,6 @@ public class TestClientToAMTokens { private final byte[] secretKey; private InetSocketAddress address; private boolean pinged = false; - private ClientToAMTokenSecretManager secretManager; public CustomAM(ApplicationAttemptId appId, byte[] secretKey) { super("CustomAM"); @@ -132,12 +131,14 @@ public class TestClientToAMTokens { protected void serviceStart() throws Exception { Configuration conf = getConfig(); - secretManager = new ClientToAMTokenSecretManager(this.appAttemptId, secretKey); Server server; try { server = - new RPC.Builder(conf).setProtocol(CustomProtocol.class) - .setNumHandlers(1).setSecretManager(secretManager) + new RPC.Builder(conf) + .setProtocol(CustomProtocol.class) + .setNumHandlers(1) + .setSecretManager( + new ClientToAMTokenSecretManager(this.appAttemptId, secretKey)) .setInstance(this).build(); } catch (Exception e) { throw new YarnRuntimeException(e); @@ -146,14 +147,10 @@ public class TestClientToAMTokens { this.address = NetUtils.getConnectAddress(server); super.serviceStart(); } - - public ClientToAMTokenSecretManager getClientToAMTokenSecretManager() { - return this.secretManager; - } } @Test - public void testClientToAMs() throws Exception { + public void testClientToAMTokens() throws Exception { final Configuration conf = new Configuration(); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, @@ -204,7 +201,7 @@ public class TestClientToAMTokens { GetApplicationReportResponse reportResponse = rm.getClientRMService().getApplicationReport(request); ApplicationReport appReport = reportResponse.getApplicationReport(); - org.apache.hadoop.yarn.api.records.Token clientToAMToken = + org.apache.hadoop.yarn.api.records.Token originalClientToAMToken = appReport.getClientToAMToken(); ApplicationAttemptId appAttempt = app.getCurrentAppAttempt().getAppAttemptId(); @@ -259,17 +256,47 @@ public class TestClientToAMTokens { Assert.assertFalse(am.pinged); } - // Verify denial for a malicious user - UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); Token token = - ConverterUtils.convertFromYarn(clientToAMToken, am.address); + ConverterUtils.convertFromYarn(originalClientToAMToken, am.address); + // Verify denial for a malicious user with tampered ID + verifyTokenWithTamperedID(conf, am, token); + + // Verify denial for a malicious user with tampered user-name + verifyTokenWithTamperedUserName(conf, am, token); + + // Now for an authenticated user + verifyValidToken(conf, am, token); + } + + private void verifyTokenWithTamperedID(final Configuration conf, + final CustomAM am, Token token) + throws IOException { // Malicious user, messes with appId + UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); ClientToAMTokenIdentifier maliciousID = new ClientToAMTokenIdentifier(BuilderUtils.newApplicationAttemptId( - BuilderUtils.newApplicationId(app.getApplicationId() - .getClusterTimestamp(), 42), 43)); + BuilderUtils.newApplicationId(am.appAttemptId.getApplicationId() + .getClusterTimestamp(), 42), 43), UserGroupInformation + .getCurrentUser().getShortUserName()); + verifyTamperedToken(conf, am, token, ugi, maliciousID); + } + + private void verifyTokenWithTamperedUserName(final Configuration conf, + final CustomAM am, Token token) + throws IOException { + // Malicious user, messes with appId + UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); + ClientToAMTokenIdentifier maliciousID = + new ClientToAMTokenIdentifier(am.appAttemptId, "evilOrc"); + + verifyTamperedToken(conf, am, token, ugi, maliciousID); + } + + private void verifyTamperedToken(final Configuration conf, final CustomAM am, + Token token, UserGroupInformation ugi, + ClientToAMTokenIdentifier maliciousID) { Token maliciousToken = new Token(maliciousID.getBytes(), token.getPassword(), token.getKind(), @@ -309,8 +336,12 @@ public class TestClientToAMTokens { + "Mismatched response.")); Assert.assertFalse(am.pinged); } + } - // Now for an authenticated user + private void verifyValidToken(final Configuration conf, final CustomAM am, + Token token) throws IOException, + InterruptedException { + UserGroupInformation ugi; ugi = UserGroupInformation.createRemoteUser("me"); ugi.addToken(token); @@ -326,5 +357,4 @@ public class TestClientToAMTokens { } }); } - } From daa0713eb0661026790de69e4241aa00df207eea Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 30 Aug 2013 07:01:47 +0000 Subject: [PATCH 100/153] HDFS-5144. Document time unit to NameNodeMetrics. Contributed by Akira Ajisaka. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518895 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/metrics/NameNodeMetrics.java | 6 ++++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 84100e18c4d..84898cd29bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,6 +120,9 @@ Trunk (Unreleased) HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth) + HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via + suresh) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java index d02186d34b6..9aac83ddaf3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java @@ -80,8 +80,10 @@ public class NameNodeMetrics { @Metric("Block report") MutableRate blockReport; MutableQuantiles[] blockReportQuantiles; - @Metric("Duration in SafeMode at startup") MutableGaugeInt safeModeTime; - @Metric("Time loading FS Image at startup") MutableGaugeInt fsImageLoadTime; + @Metric("Duration in SafeMode at startup in msec") + MutableGaugeInt safeModeTime; + @Metric("Time loading FS Image at startup in msec") + MutableGaugeInt fsImageLoadTime; NameNodeMetrics(String processName, String sessionId, int[] intervals) { registry.tag(ProcessName, processName).tag(SessionId, sessionId); From 25d4c2fd53ec435af39616b8675d956192c166b0 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 30 Aug 2013 07:36:45 +0000 Subject: [PATCH 101/153] HDFS-5140. Too many safemode monitor threads being created in the standby namenode causing it to fail with out of memory error. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518899 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../hdfs/server/namenode/FSNamesystem.java | 18 +++++++++++------- 2 files changed, 14 insertions(+), 7 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 84898cd29bb..2eacaaffb1b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -407,6 +407,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5077. NPE in FSNamesystem.commitBlockSynchronization(). (Plamen Jeliazkov via shv) + HDFS-5140. Too many safemode monitor threads being created in the standby + namenode causing it to fail with out of memory error. (jing9) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index a397ce94fd1..0ed66324e45 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -366,7 +366,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, final LeaseManager leaseManager = new LeaseManager(this); - Daemon smmthread = null; // SafeModeMonitor thread + volatile Daemon smmthread = null; // SafeModeMonitor thread Daemon nnrmthread = null; // NamenodeResourceMonitor thread @@ -4555,7 +4555,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats, // Have to have write-lock since leaving safemode initializes // repl queues, which requires write lock assert hasWriteLock(); - if (needEnter()) { + // if smmthread is already running, the block threshold must have been + // reached before, there is no need to enter the safe mode again + if (smmthread == null && needEnter()) { enter(); // check if we are ready to initialize replication queues if (canInitializeReplQueues() && !isPopulatingReplQueues()) { @@ -4564,7 +4566,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, reportStatus("STATE* Safe mode ON.", false); return; } - // the threshold is reached + // the threshold is reached or was reached before if (!isOn() || // safe mode is off extension <= 0 || threshold <= 0) { // don't need to wait this.leave(); // leave safe mode @@ -4576,9 +4578,11 @@ public class FSNamesystem implements Namesystem, FSClusterStats, } // start monitor reached = now(); - smmthread = new Daemon(new SafeModeMonitor()); - smmthread.start(); - reportStatus("STATE* Safe mode extension entered.", true); + if (smmthread == null) { + smmthread = new Daemon(new SafeModeMonitor()); + smmthread.start(); + reportStatus("STATE* Safe mode extension entered.", true); + } // check if we are ready to initialize replication queues if (canInitializeReplQueues() && !isPopulatingReplQueues()) { @@ -4814,6 +4818,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (safeMode.canLeave()) { // Leave safe mode. safeMode.leave(); + smmthread = null; break; } } finally { @@ -4829,7 +4834,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats, if (!fsRunning) { LOG.info("NameNode is being shutdown, exit SafeModeMonitor thread"); } - smmthread = null; } } From 28ac26bc43cb574c94538527326a26e1aee6c113 Mon Sep 17 00:00:00 2001 From: Suresh Srinivas Date: Fri, 30 Aug 2013 08:07:54 +0000 Subject: [PATCH 102/153] HADOOP-5144. Move change from 3.0 to 2.3.0 release section git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1518907 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2eacaaffb1b..068f7003175 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,9 +120,6 @@ Trunk (Unreleased) HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth) - HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via - suresh) - OPTIMIZATIONS BUG FIXES @@ -267,6 +264,9 @@ Release 2.3.0 - UNRELEASED HDFS-4994. Audit log getContentSummary() calls. (Robert Parker via kihwal) + HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via + suresh) + OPTIMIZATIONS BUG FIXES From bafd302208fbbbf1f2dccfc969a71d862cc8ce67 Mon Sep 17 00:00:00 2001 From: Bikas Saha Date: Fri, 30 Aug 2013 22:58:41 +0000 Subject: [PATCH 103/153] YARN-771. AMRMClient support for resource blacklisting (Junping Du via bikas) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519107 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../hadoop/yarn/client/api/AMRMClient.java | 11 ++ .../yarn/client/api/impl/AMRMClientImpl.java | 55 ++++++- .../yarn/client/api/impl/TestAMRMClient.java | 142 ++++++++++++++++++ .../pb/ResourceBlacklistRequestPBImpl.java | 4 +- 5 files changed, 210 insertions(+), 5 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 13e52bfe24f..c3a696c15f9 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -69,6 +69,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1080. Improved help message for "yarn logs" command. (Xuan Gong via vinodkv) + YARN-771. AMRMClient support for resource blacklisting (Junping Du via + bikas) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java index 9a0caad4c2b..f4913cdbda5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/AMRMClient.java @@ -286,4 +286,15 @@ public abstract class AMRMClient extends Priority priority, String resourceName, Resource capability); + + /** + * Update application's blacklist with addition or removal resources. + * + * @param blacklistAdditions list of resources which should be added to the + * application blacklist + * @param blacklistRemovals list of resources which should be removed from the + * application blacklist + */ + public abstract void updateBlacklist(List blacklistAdditions, + List blacklistRemovals); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 99e896e5f57..c433b55b6ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -51,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NMToken; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.client.ClientRMProxy; import org.apache.hadoop.yarn.client.api.AMRMClient; @@ -80,6 +81,9 @@ public class AMRMClientImpl extends AMRMClient { protected Resource clusterAvailableResources; protected int clusterNodeCount; + protected final Set blacklistAdditions = new HashSet(); + protected final Set blacklistRemovals = new HashSet(); + class ResourceRequestInfo { ResourceRequest remoteRequest; LinkedHashSet containerRequests; @@ -199,9 +203,11 @@ public class AMRMClientImpl extends AMRMClient { Preconditions.checkArgument(progressIndicator >= 0, "Progress indicator should not be negative"); AllocateResponse allocateResponse = null; - ArrayList askList = null; - ArrayList releaseList = null; + List askList = null; + List releaseList = null; AllocateRequest allocateRequest = null; + List blacklistToAdd = new ArrayList(); + List blacklistToRemove = new ArrayList(); try { synchronized (this) { @@ -217,9 +223,22 @@ public class AMRMClientImpl extends AMRMClient { // optimistically clear this collection assuming no RPC failure ask.clear(); release.clear(); + + blacklistToAdd.addAll(blacklistAdditions); + blacklistToRemove.addAll(blacklistRemovals); + + ResourceBlacklistRequest blacklistRequest = + (blacklistToAdd != null) || (blacklistToRemove != null) ? + ResourceBlacklistRequest.newInstance(blacklistToAdd, + blacklistToRemove) : null; + allocateRequest = AllocateRequest.newInstance(lastResponseId, progressIndicator, - askList, releaseList, null); + askList, releaseList, blacklistRequest); + // clear blacklistAdditions and blacklistRemovals before + // unsynchronized part + blacklistAdditions.clear(); + blacklistRemovals.clear(); } allocateResponse = rmClient.allocate(allocateRequest); @@ -253,6 +272,9 @@ public class AMRMClientImpl extends AMRMClient { ask.add(oldAsk); } } + + blacklistAdditions.addAll(blacklistToAdd); + blacklistRemovals.addAll(blacklistToRemove); } } } @@ -604,4 +626,31 @@ public class AMRMClientImpl extends AMRMClient { + " #asks=" + ask.size()); } } + + @Override + public synchronized void updateBlacklist(List blacklistAdditions, + List blacklistRemovals) { + + if (blacklistAdditions != null) { + this.blacklistAdditions.addAll(blacklistAdditions); + // if some resources are also in blacklistRemovals updated before, we + // should remove them here. + this.blacklistRemovals.removeAll(blacklistAdditions); + } + + if (blacklistRemovals != null) { + this.blacklistRemovals.addAll(blacklistRemovals); + // if some resources are in blacklistAdditions before, we should remove + // them here. + this.blacklistAdditions.removeAll(blacklistRemovals); + } + + if (blacklistAdditions != null && blacklistRemovals != null + && blacklistAdditions.removeAll(blacklistRemovals)) { + // we allow resources to appear in addition list and removal list in the + // same invocation of updateBlacklist(), but should get a warn here. + LOG.warn("The same resources appear in both blacklistAdditions and " + + "blacklistRemovals in updateBlacklist."); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java index f4a6dd3f3c8..f24a2cd88fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java @@ -18,13 +18,16 @@ package org.apache.hadoop.yarn.client.api.impl; +import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import static org.mockito.Matchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; import java.nio.ByteBuffer; +import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; import java.util.Collections; @@ -97,6 +100,7 @@ public class TestAMRMClient { static String rack; static String[] nodes; static String[] racks; + private final static int DEFAULT_ITERATION = 3; @BeforeClass public static void setup() throws Exception { @@ -476,6 +480,144 @@ public class TestAMRMClient { } } } + + @Test (timeout=60000) + public void testAllocationWithBlacklist() throws YarnException, IOException { + AMRMClientImpl amClient = null; + try { + // start am rm client + amClient = + (AMRMClientImpl) AMRMClient + . createAMRMClient(); + amClient.init(conf); + amClient.start(); + amClient.registerApplicationMaster("Host", 10000, ""); + + assertTrue(amClient.ask.size() == 0); + assertTrue(amClient.release.size() == 0); + + ContainerRequest storedContainer1 = + new ContainerRequest(capability, nodes, racks, priority); + amClient.addContainerRequest(storedContainer1); + assertTrue(amClient.ask.size() == 3); + assertTrue(amClient.release.size() == 0); + + List localNodeBlacklist = new ArrayList(); + localNodeBlacklist.add(node); + + // put node in black list, so no container assignment + amClient.updateBlacklist(localNodeBlacklist, null); + + int allocatedContainerCount = getAllocatedContainersNumber(amClient, + DEFAULT_ITERATION); + // the only node is in blacklist, so no allocation + assertTrue(allocatedContainerCount == 0); + + // Remove node from blacklist, so get assigned with 2 + amClient.updateBlacklist(null, localNodeBlacklist); + ContainerRequest storedContainer2 = + new ContainerRequest(capability, nodes, racks, priority); + amClient.addContainerRequest(storedContainer2); + allocatedContainerCount = getAllocatedContainersNumber(amClient, + DEFAULT_ITERATION); + assertEquals(allocatedContainerCount, 2); + + // Test in case exception in allocate(), blacklist is kept + assertTrue(amClient.blacklistAdditions.isEmpty()); + assertTrue(amClient.blacklistRemovals.isEmpty()); + + // create a invalid ContainerRequest - memory value is minus + ContainerRequest invalidContainerRequest = + new ContainerRequest(Resource.newInstance(-1024, 1), + nodes, racks, priority); + amClient.addContainerRequest(invalidContainerRequest); + amClient.updateBlacklist(localNodeBlacklist, null); + try { + // allocate() should complain as ContainerRequest is invalid. + amClient.allocate(0.1f); + fail("there should be an exception here."); + } catch (Exception e) { + assertEquals(amClient.blacklistAdditions.size(), 1); + } + } finally { + if (amClient != null && amClient.getServiceState() == STATE.STARTED) { + amClient.stop(); + } + } + } + + @Test (timeout=60000) + public void testAMRMClientWithBlacklist() throws YarnException, IOException { + AMRMClientImpl amClient = null; + try { + // start am rm client + amClient = + (AMRMClientImpl) AMRMClient + . createAMRMClient(); + amClient.init(conf); + amClient.start(); + amClient.registerApplicationMaster("Host", 10000, ""); + String[] nodes = {"node1", "node2", "node3"}; + + // Add nodes[0] and nodes[1] + List nodeList01 = new ArrayList(); + nodeList01.add(nodes[0]); + nodeList01.add(nodes[1]); + amClient.updateBlacklist(nodeList01, null); + assertEquals(amClient.blacklistAdditions.size(),2); + assertEquals(amClient.blacklistRemovals.size(),0); + + // Add nodes[0] again, verify it is not added duplicated. + List nodeList02 = new ArrayList(); + nodeList02.add(nodes[0]); + nodeList02.add(nodes[2]); + amClient.updateBlacklist(nodeList02, null); + assertEquals(amClient.blacklistAdditions.size(),3); + assertEquals(amClient.blacklistRemovals.size(),0); + + // Add nodes[1] and nodes[2] to removal list, + // Verify addition list remove these two nodes. + List nodeList12 = new ArrayList(); + nodeList12.add(nodes[1]); + nodeList12.add(nodes[2]); + amClient.updateBlacklist(null, nodeList12); + assertEquals(amClient.blacklistAdditions.size(),1); + assertEquals(amClient.blacklistRemovals.size(),2); + + // Add nodes[1] again to addition list, + // Verify removal list will remove this node. + List nodeList1 = new ArrayList(); + nodeList1.add(nodes[1]); + amClient.updateBlacklist(nodeList1, null); + assertEquals(amClient.blacklistAdditions.size(),2); + assertEquals(amClient.blacklistRemovals.size(),1); + } finally { + if (amClient != null && amClient.getServiceState() == STATE.STARTED) { + amClient.stop(); + } + } + } + + private int getAllocatedContainersNumber( + AMRMClientImpl amClient, int iterationsLeft) + throws YarnException, IOException { + int allocatedContainerCount = 0; + while (iterationsLeft-- > 0) { + Log.info(" == alloc " + allocatedContainerCount + " it left " + iterationsLeft); + AllocateResponse allocResponse = amClient.allocate(0.1f); + assertTrue(amClient.ask.size() == 0); + assertTrue(amClient.release.size() == 0); + + assertTrue(nodeCount == amClient.getClusterNodeCount()); + allocatedContainerCount += allocResponse.getAllocatedContainers().size(); + + if(allocatedContainerCount == 0) { + // sleep to let NM's heartbeat to RM and trigger allocations + sleep(100); + } + } + return allocatedContainerCount; + } @Test (timeout=60000) public void testAMRMClient() throws YarnException, IOException { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java index 96cb139925c..743e5d12c3f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourceBlacklistRequestPBImpl.java @@ -125,7 +125,7 @@ public class ResourceBlacklistRequestPBImpl extends ResourceBlacklistRequest { @Override public void setBlacklistAdditions(List resourceNames) { - if (resourceNames == null) { + if (resourceNames == null || resourceNames.isEmpty()) { if (this.blacklistAdditions != null) { this.blacklistAdditions.clear(); } @@ -144,7 +144,7 @@ public class ResourceBlacklistRequestPBImpl extends ResourceBlacklistRequest { @Override public void setBlacklistRemovals(List resourceNames) { - if (resourceNames == null) { + if (resourceNames == null || resourceNames.isEmpty()) { if (this.blacklistRemovals != null) { this.blacklistRemovals.clear(); } From 6d100eb79409cbca70a22ada705dedf6658545e3 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 30 Aug 2013 23:26:01 +0000 Subject: [PATCH 104/153] YARN-1117. Improved help messages for "yarn application" and "yarn node" commands. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519117 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../yarn/client/cli/ApplicationCLI.java | 33 +++++--- .../hadoop/yarn/client/cli/NodeCLI.java | 20 +++-- .../yarn/client/api/impl/TestYarnClient.java | 1 + .../hadoop/yarn/client/cli/TestYarnCLI.java | 83 ++++++++++++++++++- .../hadoop/yarn/util/ConverterUtils.java | 3 +- 6 files changed, 123 insertions(+), 20 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index c3a696c15f9..05206aedbe0 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -72,6 +72,9 @@ Release 2.1.1-beta - UNRELEASED YARN-771. AMRMClient support for resource blacklisting (Junping Du via bikas) + YARN-1117. Improved help messages for "yarn application" and "yarn node" + commands. (Xuan Gong via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 69de37a76c4..272c0bf6044 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -29,6 +29,7 @@ import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.hadoop.classification.InterfaceAudience.Private; @@ -70,32 +71,38 @@ public class ApplicationCLI extends YarnCLI { Options opts = new Options(); opts.addOption(STATUS_CMD, true, "Prints the status of the application."); opts.addOption(LIST_CMD, false, "List applications from the RM. " + - "Supports optional use of --appTypes to filter applications " + + "Supports optional use of -appTypes to filter applications " + "based on application type, " + - "and --appStates to filter applications based on application state"); + "and -appStates to filter applications based on application state"); opts.addOption(KILL_CMD, true, "Kills the application."); opts.addOption(HELP_CMD, false, "Displays help for all commands."); - Option appTypeOpt = new Option(APP_TYPE_CMD, true, - "Works with --list to filter applications based on their type."); + Option appTypeOpt = new Option(APP_TYPE_CMD, true, "Works with -list to " + + "filter applications based on " + + "input comma-separated list of application types."); appTypeOpt.setValueSeparator(','); appTypeOpt.setArgs(Option.UNLIMITED_VALUES); - appTypeOpt.setArgName("Comma-separated list of application types"); + appTypeOpt.setArgName("Types"); opts.addOption(appTypeOpt); - Option appStateOpt = - new Option( - APP_STATE_CMD, - true, - "Works with --list to filter applications based on their state. " - + getAllValidApplicationStates()); + Option appStateOpt = new Option(APP_STATE_CMD, true, "Works with -list " + + "to filter applications based on input comma-separated list of " + + "application states. " + getAllValidApplicationStates()); appStateOpt.setValueSeparator(','); appStateOpt.setArgs(Option.UNLIMITED_VALUES); - appStateOpt.setArgName("Comma-separated list of application states"); + appStateOpt.setArgName("States"); opts.addOption(appStateOpt); opts.getOption(KILL_CMD).setArgName("Application ID"); opts.getOption(STATUS_CMD).setArgName("Application ID"); - CommandLine cliParser = new GnuParser().parse(opts, args); int exitCode = -1; + CommandLine cliParser = null; + try { + cliParser = new GnuParser().parse(opts, args); + } catch (MissingArgumentException ex) { + sysout.println("Missing argument for options"); + printUsage(opts); + return exitCode; + } + if (cliParser.hasOption(STATUS_CMD)) { if (args.length != 2) { printUsage(opts); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java index c62b4d40598..f77c56f927a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java @@ -28,6 +28,7 @@ import java.util.Set; import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.GnuParser; import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.MissingArgumentException; import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.commons.lang.time.DateFormatUtils; @@ -64,20 +65,29 @@ public class NodeCLI extends YarnCLI { Options opts = new Options(); opts.addOption(STATUS_CMD, true, "Prints the status report of the node."); opts.addOption(LIST_CMD, false, "List all running nodes. " + - "Supports optional use of --states to filter nodes " + - "based on node state, all --all to list all nodes."); + "Supports optional use of -states to filter nodes " + + "based on node state, all -all to list all nodes."); Option nodeStateOpt = new Option(NODE_STATE_CMD, true, - "Works with -list to filter nodes based on their states."); + "Works with -list to filter nodes based on input comma-separated list of node states."); nodeStateOpt.setValueSeparator(','); nodeStateOpt.setArgs(Option.UNLIMITED_VALUES); - nodeStateOpt.setArgName("Comma-separated list of node states"); + nodeStateOpt.setArgName("States"); opts.addOption(nodeStateOpt); Option allOpt = new Option(NODE_ALL, false, "Works with -list to list all nodes."); opts.addOption(allOpt); - CommandLine cliParser = new GnuParser().parse(opts, args); + opts.getOption(STATUS_CMD).setArgName("NodeId"); int exitCode = -1; + CommandLine cliParser = null; + try { + cliParser = new GnuParser().parse(opts, args); + } catch (MissingArgumentException ex) { + sysout.println("Missing argument for options"); + printUsage(opts); + return exitCode; + } + if (cliParser.hasOption("status")) { if (args.length != 2) { printUsage(opts); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index e7a66bd28ef..826433d5048 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -86,6 +86,7 @@ public class TestYarnClient { client.init(conf); client.start(); client.stop(); + rm.stop(); } @Test (timeout = 30000) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index c6b49465e1d..670ecbc674d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -29,6 +29,7 @@ import static org.mockito.Mockito.when; import static org.mockito.Mockito.doThrow; import java.io.ByteArrayOutputStream; +import java.io.IOException; import java.io.PrintStream; import java.io.PrintWriter; import java.util.ArrayList; @@ -72,6 +73,7 @@ public class TestYarnCLI { sysOut = spy(new PrintStream(sysOutStream)); sysErrStream = new ByteArrayOutputStream(); sysErr = spy(new PrintStream(sysErrStream)); + System.setOut(sysOut); } @Test @@ -456,21 +458,40 @@ public class TestYarnCLI { } @Test (timeout = 10000) - public void testHelpCommand() throws Exception { + public void testAppsHelpCommand() throws Exception { ApplicationCLI cli = createAndGetAppCLI(); ApplicationCLI spyCli = spy(cli); int result = spyCli.run(new String[] { "-help" }); Assert.assertTrue(result == 0); verify(spyCli).printUsage(any(Options.class)); + Assert.assertEquals(createApplicationCLIHelpMessage(), + sysOutStream.toString()); + sysOutStream.reset(); ApplicationId applicationId = ApplicationId.newInstance(1234, 5); result = cli.run(new String[] { "-kill", applicationId.toString(), "args" }); verify(spyCli).printUsage(any(Options.class)); + Assert.assertEquals(createApplicationCLIHelpMessage(), + sysOutStream.toString()); + sysOutStream.reset(); NodeId nodeId = NodeId.newInstance("host0", 0); result = cli.run(new String[] { "-status", nodeId.toString(), "args" }); verify(spyCli).printUsage(any(Options.class)); + Assert.assertEquals(createApplicationCLIHelpMessage(), + sysOutStream.toString()); + } + + @Test (timeout = 5000) + public void testNodesHelpCommand() throws Exception { + NodeCLI nodeCLI = new NodeCLI(); + nodeCLI.setClient(client); + nodeCLI.setSysOutPrintStream(sysOut); + nodeCLI.setSysErrPrintStream(sysErr); + nodeCLI.run(new String[] {}); + Assert.assertEquals(createNodeCLIHelpMessage(), + sysOutStream.toString()); } @Test @@ -806,6 +827,25 @@ public class TestYarnCLI { verifyUsageInfo(new NodeCLI()); } + @Test + public void testMissingArguments() throws Exception { + ApplicationCLI cli = createAndGetAppCLI(); + int result = cli.run(new String[] { "-status" }); + Assert.assertEquals(result, -1); + Assert.assertEquals("Missing argument for options\n" + + createApplicationCLIHelpMessage(), sysOutStream.toString()); + + sysOutStream.reset(); + NodeCLI nodeCLI = new NodeCLI(); + nodeCLI.setClient(client); + nodeCLI.setSysOutPrintStream(sysOut); + nodeCLI.setSysErrPrintStream(sysErr); + result = nodeCLI.run(new String[] { "-status" }); + Assert.assertEquals(result, -1); + Assert.assertEquals("Missing argument for options\n" + + createNodeCLIHelpMessage(), sysOutStream.toString()); + } + private void verifyUsageInfo(YarnCLI cli) throws Exception { cli.setSysErrPrintStream(sysErr); cli.run(new String[0]); @@ -832,4 +872,45 @@ public class TestYarnCLI { return cli; } + private String createApplicationCLIHelpMessage() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(baos); + pw.println("usage: application"); + pw.println(" -appStates Works with -list to filter applications based"); + pw.println(" on input comma-separated list of application"); + pw.println(" states. The valid application state can be one"); + pw.println(" of the following:"); + pw.println(" ALL,NEW,NEW_SAVING,SUBMITTED,ACCEPTED,RUNNING,"); + pw.println(" FINISHED,FAILED,KILLED"); + pw.println(" -appTypes Works with -list to filter applications based"); + pw.println(" on input comma-separated list of application"); + pw.println(" types."); + pw.println(" -help Displays help for all commands."); + pw.println(" -kill Kills the application."); + pw.println(" -list List applications from the RM. Supports"); + pw.println(" optional use of -appTypes to filter"); + pw.println(" applications based on application type, and"); + pw.println(" -appStates to filter applications based on"); + pw.println(" application state"); + pw.println(" -status Prints the status of the application."); + pw.close(); + String appsHelpStr = baos.toString("UTF-8"); + return appsHelpStr; + } + + private String createNodeCLIHelpMessage() throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + PrintWriter pw = new PrintWriter(baos); + pw.println("usage: node"); + pw.println(" -all Works with -list to list all nodes."); + pw.println(" -list List all running nodes. Supports optional use of"); + pw.println(" -states to filter nodes based on node state, all -all"); + pw.println(" to list all nodes."); + pw.println(" -states Works with -list to filter nodes based on input"); + pw.println(" comma-separated list of node states."); + pw.println(" -status Prints the status report of the node."); + pw.close(); + String nodesHelpStr = baos.toString("UTF-8"); + return nodesHelpStr; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java index bb93b9187d8..596ae28d25f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java @@ -198,7 +198,8 @@ public class ConverterUtils { Iterator it = _split(appIdStr).iterator(); if (!it.next().equals(APPLICATION_PREFIX)) { throw new IllegalArgumentException("Invalid ApplicationId prefix: " - + appIdStr); + + appIdStr + ". The valid ApplicationId should start with prefix " + + APPLICATION_PREFIX); } try { return toApplicationId(it); From 236b8530bd05015d3b8a8131b111454c54c9e55d Mon Sep 17 00:00:00 2001 From: Sanford Ryza Date: Sat, 31 Aug 2013 02:09:45 +0000 Subject: [PATCH 105/153] HADOOP-9918. Add addIfService to CompositeService (Karthik Kambatla via Sandy Ryza) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519129 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../hadoop/service/CompositeService.java | 20 ++++++++++++++++ .../hadoop/mapreduce/v2/app/MRAppMaster.java | 6 ----- .../yarn/util/TestCompositeService.java | 23 +++++++++++++++++++ .../ContainerManagerImpl.java | 7 ------ .../resourcemanager/ResourceManager.java | 7 ------ 6 files changed, 46 insertions(+), 20 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 7e12e7f288b..025aaf1dab6 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -399,6 +399,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9906. Move HAZKUtil to o.a.h.util.ZKUtil and make inner-classes public (Karthik Kambatla via Sandy Ryza) + HADOOP-9918. Add addIfService to CompositeService (Karthik Kambatla via + Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java index 476f79225d7..383a7a84148 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/service/CompositeService.java @@ -64,6 +64,11 @@ public class CompositeService extends AbstractService { } } + /** + * Add the passed {@link Service} to the list of services managed by this + * {@link CompositeService} + * @param service the {@link Service} to be added + */ protected void addService(Service service) { if (LOG.isDebugEnabled()) { LOG.debug("Adding service " + service.getName()); @@ -73,6 +78,21 @@ public class CompositeService extends AbstractService { } } + /** + * If the passed object is an instance of {@link Service}, + * add it to the list of services managed by this {@link CompositeService} + * @param object + * @return true if a service is added, false otherwise. + */ + protected boolean addIfService(Object object) { + if (object instanceof Service) { + addService((Service) object); + return true; + } else { + return false; + } + } + protected synchronized boolean removeService(Service service) { synchronized (serviceList) { return serviceList.add(service); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index e6df1fcad38..58f4deef3a1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -626,12 +626,6 @@ public class MRAppMaster extends CompositeService { } } - protected void addIfService(Object object) { - if (object instanceof Service) { - addService((Service) object); - } - } - protected EventHandler createJobHistoryHandler( AppContext context) { this.jobHistoryEventHandler = new JobHistoryEventHandler(context, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java index 79189985710..34b77cd92bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestCompositeService.java @@ -19,11 +19,14 @@ package org.apache.hadoop.yarn.util; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.BreakableService; import org.apache.hadoop.service.CompositeService; import org.apache.hadoop.service.Service; @@ -314,6 +317,26 @@ public class TestCompositeService { composite.init(new Configuration()); assertInState(STATE.INITED, child); } + + @Test (timeout = 1000) + public void testAddIfService() { + CompositeService testService = new CompositeService("TestService") { + Service service; + @Override + public void serviceInit(Configuration conf) { + Integer notAService = new Integer(0); + assertFalse("Added an integer as a service", + addIfService(notAService)); + + service = new AbstractService("Service") {}; + assertTrue("Unable to add a service", addIfService(service)); + } + }; + + testService.init(new Configuration()); + assertEquals("Incorrect number of services", + 1, testService.getServices().size()); + } public static class CompositeServiceAddingAChild extends CompositeService{ Service child; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index f8a5ea28f24..4902836c14c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -85,7 +85,6 @@ import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger; import org.apache.hadoop.yarn.server.nodemanager.NMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.nodemanager.NodeManager; import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdater; -import org.apache.hadoop.yarn.server.nodemanager.NodeStatusUpdaterImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationContainerInitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationEvent; @@ -193,12 +192,6 @@ public class ContainerManagerImpl extends CompositeService implements super.serviceInit(conf); } - private void addIfService(Object object) { - if (object instanceof Service) { - addService((Service) object); - } - } - protected LogHandler createLogHandler(Configuration conf, Context context, DeletionService deletionService) { if (conf.getBoolean(YarnConfiguration.LOG_AGGREGATION_ENABLED, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index 6ca5307bc53..c0b372a753c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -33,7 +33,6 @@ import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.service.CompositeService; -import org.apache.hadoop.service.Service; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ShutdownHookManager; @@ -302,12 +301,6 @@ public class ResourceManager extends CompositeService implements Recoverable { return new AsyncDispatcher(); } - protected void addIfService(Object object) { - if (object instanceof Service) { - addService((Service) object); - } - } - protected AMRMTokenSecretManager createAMRMTokenSecretManager( Configuration conf) { return new AMRMTokenSecretManager(conf); From 76cb07ee2077da61e0c07131bcbe7d0ddc73080e Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 31 Aug 2013 19:55:37 +0000 Subject: [PATCH 106/153] YARN-981. Fixed YARN webapp so that /logs servlet works like before. Addendum patch to fix bugs in the first patch. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519208 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/yarn/webapp/WebApps.java | 3 +- .../webapp/MyTestJAXBContextResolver.java | 56 ++++++++++++ .../hadoop/yarn/webapp/MyTestWebService.java | 47 ++++++++++ .../apache/hadoop/yarn/webapp/TestWebApp.java | 91 ++++++++++++++----- 4 files changed, 173 insertions(+), 24 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestJAXBContextResolver.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestWebService.java diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java index 622a9d8c239..a56d2a4beda 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java @@ -242,9 +242,8 @@ public class WebApps { for(Map.Entry entry : attributes.entrySet()) { server.setAttribute(entry.getKey(), entry.getValue()); } - String webAppPath = "/" + name + "/*"; server.defineFilter(server.getWebAppContext(), "guice", - GuiceFilter.class.getName(), null, new String[] { webAppPath, "/" }); + GuiceFilter.class.getName(), null, new String[] { "/*" }); webapp.setConf(conf); webapp.setHttpServer(server); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestJAXBContextResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestJAXBContextResolver.java new file mode 100644 index 00000000000..6f6ee5d4b53 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestJAXBContextResolver.java @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by joblicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.webapp; + +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; + +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; + +import org.apache.hadoop.yarn.webapp.MyTestWebService.MyInfo; + +import com.google.inject.Singleton; +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; + +@Singleton +@Provider +public class MyTestJAXBContextResolver implements ContextResolver { + + private JAXBContext context; + private final Set types; + + // you have to specify all the dao classes here + private final Class[] cTypes = { MyInfo.class }; + + public MyTestJAXBContextResolver() throws Exception { + this.types = new HashSet(Arrays.asList(cTypes)); + this.context = + new JSONJAXBContext(JSONConfiguration.natural().rootUnwrapping(false) + .build(), cTypes); + } + + @Override + public JAXBContext getContext(Class objectType) { + return (types.contains(objectType)) ? context : null; + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestWebService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestWebService.java new file mode 100644 index 00000000000..f37b01a1462 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/MyTestWebService.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by joblicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.webapp; + +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.xml.bind.annotation.XmlAccessType; +import javax.xml.bind.annotation.XmlAccessorType; +import javax.xml.bind.annotation.XmlRootElement; + +import com.google.inject.Singleton; + +@Singleton +@Path("/ws/v1/test") +public class MyTestWebService { + @GET + @Produces({ MediaType.APPLICATION_XML }) + public MyInfo get() { + return new MyInfo(); + } + + @XmlRootElement(name = "myInfo") + @XmlAccessorType(XmlAccessType.FIELD) + static class MyInfo { + public MyInfo() { + + } + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java index 59beab67a4b..3d8acf29523 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/webapp/TestWebApp.java @@ -18,30 +18,47 @@ package org.apache.hadoop.yarn.webapp; -import org.apache.commons.lang.ArrayUtils; -import org.apache.hadoop.yarn.MockApps; -import org.apache.hadoop.yarn.webapp.Controller; -import org.apache.hadoop.yarn.webapp.WebApp; -import org.apache.hadoop.yarn.webapp.WebApps; -import org.apache.hadoop.yarn.webapp.view.HtmlPage; -import org.apache.hadoop.yarn.webapp.view.JQueryUI; -import org.apache.hadoop.yarn.webapp.view.TextPage; - -import com.google.inject.Inject; +import static org.apache.hadoop.yarn.util.StringHelper.join; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_TABLE; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.DATATABLES_ID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._INFO_WRAP; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI._TH; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.initID; +import static org.apache.hadoop.yarn.webapp.view.JQueryUI.tableInit; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; import java.io.InputStream; import java.net.HttpURLConnection; import java.net.URL; +import java.util.Arrays; +import java.util.HashSet; +import java.util.Set; -import static org.apache.hadoop.yarn.util.StringHelper.*; -import static org.apache.hadoop.yarn.webapp.view.JQueryUI.*; +import javax.ws.rs.GET; +import javax.ws.rs.Path; +import javax.ws.rs.Produces; +import javax.ws.rs.core.MediaType; +import javax.ws.rs.ext.ContextResolver; +import javax.ws.rs.ext.Provider; +import javax.xml.bind.JAXBContext; +import org.apache.commons.lang.ArrayUtils; +import org.apache.hadoop.yarn.MockApps; +import org.apache.hadoop.yarn.webapp.view.HtmlPage; +import org.apache.hadoop.yarn.webapp.view.JQueryUI; +import org.apache.hadoop.yarn.webapp.view.TextPage; import org.junit.Test; -import static org.junit.Assert.*; - import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import com.google.inject.Inject; +import com.google.inject.Singleton; +import com.sun.jersey.api.json.JSONConfiguration; +import com.sun.jersey.api.json.JSONJAXBContext; + public class TestWebApp { static final Logger LOG = LoggerFactory.getLogger(TestWebApp.class); @@ -227,14 +244,19 @@ public class TestWebApp { } @Test public void testCustomRoutes() throws Exception { - WebApp app = WebApps.$for("test", this).start(new WebApp() { - @Override public void setup() { - route("/:foo", FooController.class); - route("/bar/foo", FooController.class, "bar"); - route("/foo/:foo", DefaultController.class); - route("/foo/bar/:foo", DefaultController.class, "index"); - } - }); + WebApp app = + WebApps.$for("test", TestWebApp.class, this, "ws").start(new WebApp() { + @Override + public void setup() { + bind(MyTestJAXBContextResolver.class); + bind(MyTestWebService.class); + + route("/:foo", FooController.class); + route("/bar/foo", FooController.class, "bar"); + route("/foo/:foo", DefaultController.class); + route("/foo/bar/:foo", DefaultController.class, "index"); + } + }); String baseUrl = baseUrl(app); try { assertEquals("foo", getContent(baseUrl).trim()); @@ -245,6 +267,31 @@ public class TestWebApp { assertEquals("default1", getContent(baseUrl +"test/foo/1").trim()); assertEquals("default2", getContent(baseUrl +"test/foo/bar/2").trim()); assertEquals(404, getResponseCode(baseUrl +"test/goo")); + assertEquals(200, getResponseCode(baseUrl +"ws/v1/test")); + assertTrue(getContent(baseUrl +"ws/v1/test").contains("myInfo")); + } finally { + app.stop(); + } + } + + // This is to test the GuiceFilter should only be applied to webAppContext, + // not to staticContext and logContext; + @Test public void testYARNWebAppContext() throws Exception { + // setting up the log context + System.setProperty("hadoop.log.dir", "/Not/Existing/dir"); + WebApp app = WebApps.$for("test", this).start(new WebApp() { + @Override public void setup() { + route("/", FooController.class); + } + }); + String baseUrl = baseUrl(app); + try { + // should not redirect to foo + assertFalse("foo".equals(getContent(baseUrl +"static").trim())); + // Not able to access a non-existing dir, should not redirect to foo. + assertEquals(404, getResponseCode(baseUrl +"logs")); + // should be able to redirect to foo. + assertEquals("foo", getContent(baseUrl).trim()); } finally { app.stop(); } From cbca1668317f3f2d295eea53d7bd020bda4a810f Mon Sep 17 00:00:00 2001 From: Brandon Li Date: Sat, 31 Aug 2013 21:12:22 +0000 Subject: [PATCH 107/153] HDFS-5136 MNT EXPORT should give the full group list which can mount the exports. Contributed by Brandon Li git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519222 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/mount/MountResponse.java | 24 +++++++++--- .../apache/hadoop/nfs/nfs3/Nfs3Constant.java | 6 +-- .../hadoop}/nfs/security/AccessPrivilege.java | 2 +- .../hadoop}/nfs/security/NfsExports.java | 38 ++++++++++++++++++- .../hadoop}/nfs/security/TestNfsExports.java | 4 +- .../hdfs/nfs/mount/RpcProgramMountd.java | 9 +++-- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 6 +-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ 8 files changed, 72 insertions(+), 20 deletions(-) rename {hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop}/nfs/security/AccessPrivilege.java (95%) rename {hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop}/nfs/security/NfsExports.java (93%) rename {hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs => hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop}/nfs/security/TestNfsExports.java (97%) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index 03cb1ae52bd..3839acc1966 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -19,6 +19,7 @@ package org.apache.hadoop.mount; import java.util.List; +import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.XDR; import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; @@ -59,15 +60,28 @@ public class MountResponse { xdr.writeBoolean(false); // Value follows no return xdr; } - + /** Response for RPC call {@link MountInterface.MNTPROC#EXPORT} */ - public static XDR writeExportList(XDR xdr, int xid, List exports) { + public static XDR writeExportList(XDR xdr, int xid, List exports, + List hostMatcher) { + assert (exports.size() == hostMatcher.size()); + RpcAcceptedReply.voidReply(xdr, xid); - for (String export : exports) { + for (int i = 0; i < exports.size(); i++) { xdr.writeBoolean(true); // Value follows - yes - xdr.writeString(export); - xdr.writeInt(0); + xdr.writeString(exports.get(i)); + + // List host groups + String[] hostGroups = hostMatcher.get(i).getHostGroupList(); + if (hostGroups.length > 0) { + for (int j = 0; j < hostGroups.length; j++) { + xdr.writeBoolean(true); // Value follows - yes + xdr.writeVariableOpaque(hostGroups[j].getBytes()); + } + } + xdr.writeBoolean(false); // Value follows - no more group } + xdr.writeBoolean(false); // Value follows - no return xdr; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index 8e9a8f10764..706c99f47c4 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -192,13 +192,13 @@ public class Nfs3Constant { public static final String EXPORTS_ALLOWED_HOSTS_SEPARATOR = ";"; /** Allowed hosts for nfs exports */ - public static final String EXPORTS_ALLOWED_HOSTS_KEY = "hdfs.nfs.exports.allowed.hosts"; + public static final String EXPORTS_ALLOWED_HOSTS_KEY = "dfs.nfs.exports.allowed.hosts"; public static final String EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT = "* rw"; /** Size for nfs exports cache */ - public static final String EXPORTS_CACHE_SIZE_KEY = "hdfs.nfs.exports.cache.size"; + public static final String EXPORTS_CACHE_SIZE_KEY = "dfs.nfs.exports.cache.size"; public static final int EXPORTS_CACHE_SIZE_DEFAULT = 512; /** Expiration time for nfs exports cache entry */ - public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "hdfs.nfs.exports.cache.expirytime.millis"; + public static final String EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY = "dfs.nfs.exports.cache.expirytime.millis"; public static final long EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 15 * 60 * 1000; // 15 min public static final String FILE_DUMP_DIR_KEY = "dfs.nfs3.dump.dir"; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java similarity index 95% rename from hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java index 43a0d001f26..8789ecfb4e3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/AccessPrivilege.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.nfs.security; +package org.apache.hadoop.nfs.security; public enum AccessPrivilege { READ_ONLY, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java similarity index 93% rename from hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java index ad194e9e2d3..301f2f0ff72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/security/NfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.nfs.security; +package org.apache.hadoop.nfs.security; import java.net.InetAddress; import java.util.ArrayList; @@ -153,6 +153,19 @@ public class NfsExports { } } + /** + * Return the configured group list + */ + public String[] getHostGroupList() { + int listSize = mMatches.size(); + String[] hostGroups = new String[listSize]; + + for (int i = 0; i < mMatches.size(); i++) { + hostGroups[i] = mMatches.get(i).getHostGroup(); + } + return hostGroups; + } + public AccessPrivilege getAccessPrivilege(InetAddress addr) { return getAccessPrivilege(addr.getHostAddress(), addr.getCanonicalHostName()); @@ -191,6 +204,7 @@ public class NfsExports { } public abstract boolean isIncluded(String address, String hostname); + public abstract String getHostGroup(); } /** @@ -202,9 +216,14 @@ public class NfsExports { } @Override - public boolean isIncluded(String ip, String hostname) { + public boolean isIncluded(String address, String hostname) { return true; } + + @Override + public String getHostGroup() { + return "*"; + } } /** @@ -235,6 +254,11 @@ public class NfsExports { } return false; } + + @Override + public String getHostGroup() { + return subnetInfo.getAddress() + "/" + subnetInfo.getNetmask(); + } } /** @@ -264,6 +288,11 @@ public class NfsExports { } return false; } + + @Override + public String getHostGroup() { + return ipOrHost; + } } /** @@ -293,6 +322,11 @@ public class NfsExports { } return false; } + + @Override + public String getHostGroup() { + return pattern.toString(); + } } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java similarity index 97% rename from hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java index 9448e18632e..dbadd8ba339 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/security/TestNfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java @@ -15,12 +15,10 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.hdfs.nfs.security; +package org.apache.hadoop.nfs.security; import junit.framework.Assert; -import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; -import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.junit.Test; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index cec235c7d4a..5b5ea511d19 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -27,8 +27,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient; -import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; -import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.mount.MountEntry; @@ -36,6 +34,8 @@ import org.apache.hadoop.mount.MountInterface; import org.apache.hadoop.mount.MountResponse; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; +import org.apache.hadoop.nfs.security.AccessPrivilege; +import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcProgram; @@ -184,7 +184,10 @@ public class RpcProgramMountd extends RpcProgram implements MountInterface { } else if (mntproc == MNTPROC.UMNTALL) { umntall(out, xid, client); } else if (mntproc == MNTPROC.EXPORT) { - out = MountResponse.writeExportList(out, xid, exports); + // Currently only support one NFS export "/" + List hostsMatchers = new ArrayList(); + hostsMatchers.add(hostsMatcher); + out = MountResponse.writeExportList(out, xid, exports, hostsMatchers); } else { // Invalid procedure RpcAcceptedReply.voidReply(out, xid, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 1f39ace973f..404cf3e73ca 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -26,10 +26,10 @@ import java.util.EnumSet; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem.Statistics; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.FsStatus; import org.apache.hadoop.fs.Options; @@ -38,8 +38,6 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.client.HdfsDataOutputStream; -import org.apache.hadoop.hdfs.nfs.security.AccessPrivilege; -import org.apache.hadoop.hdfs.nfs.security.NfsExports; import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; @@ -98,6 +96,8 @@ import org.apache.hadoop.nfs.nfs3.response.VoidResponse; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; +import org.apache.hadoop.nfs.security.AccessPrivilege; +import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.RpcAuthSys; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 068f7003175..4d7116f4faa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -313,6 +313,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5078 Support file append in NFSv3 gateway to enable data streaming to HDFS (brandonli) + HDFS-5136 MNT EXPORT should give the full group list which can mount the + exports (brandonli) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From a759abcd898224c3481c55aa7e424bc286f60b15 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 2 Sep 2013 00:09:17 +0000 Subject: [PATCH 108/153] YARN-649. Added a new NM web-service to serve container logs in plain text over HTTP. Contributed by Sandy Ryza. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519326 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../yarn/server/nodemanager/Context.java | 5 + .../yarn/server/nodemanager/NodeManager.java | 29 +- .../ContainerManagerImpl.java | 3 +- .../application/ApplicationImpl.java | 5 +- .../nodemanager/webapp/ContainerLogsPage.java | 343 ++++++------------ .../webapp/ContainerLogsUtils.java | 190 ++++++++++ .../nodemanager/webapp/NMWebServices.java | 78 +++- .../server/nodemanager/TestEventFlow.java | 2 +- .../nodemanager/TestNodeStatusUpdater.java | 2 +- .../BaseContainerManagerTest.java | 2 +- .../application/TestApplication.java | 5 +- .../webapp/TestContainerLogsPage.java | 10 +- .../nodemanager/webapp/TestNMWebServer.java | 11 +- .../nodemanager/webapp/TestNMWebServices.java | 77 +++- .../webapp/TestNMWebServicesApps.java | 14 +- .../webapp/TestNMWebServicesContainers.java | 18 +- 17 files changed, 508 insertions(+), 289 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsUtils.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 05206aedbe0..393a4f59145 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -21,6 +21,9 @@ Release 2.3.0 - UNRELEASED NEW FEATURES + YARN-649. Added a new NM web-service to serve container logs in plain text + over HTTP. (Sandy Ryza via vinodkv) + IMPROVEMENTS YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java index f66be98a58f..729e0433d08 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/Context.java @@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Ap import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM; +import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; /** * Context interface for sharing information across components in the @@ -61,4 +62,8 @@ public interface Context { NodeHealthStatus getNodeHealthStatus(); ContainerManagementProtocol getContainerManager(); + + LocalDirsHandlerService getLocalDirsHandler(); + + ApplicationACLsManager getApplicationACLsManager(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java index 5b178df2f75..e287adde1d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java @@ -123,7 +123,8 @@ public class NodeManager extends CompositeService protected NMContext createNMContext( NMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInNM nmTokenSecretManager) { - return new NMContext(containerTokenSecretManager, nmTokenSecretManager); + return new NMContext(containerTokenSecretManager, nmTokenSecretManager, + dirsHandler, aclsManager); } protected void doSecureLogin() throws IOException { @@ -142,9 +143,6 @@ public class NodeManager extends CompositeService NMTokenSecretManagerInNM nmTokenSecretManager = new NMTokenSecretManagerInNM(); - this.context = - createNMContext(containerTokenSecretManager, nmTokenSecretManager); - this.aclsManager = new ApplicationACLsManager(conf); ContainerExecutor exec = ReflectionUtils.newInstance( @@ -165,7 +163,9 @@ public class NodeManager extends CompositeService addService(nodeHealthChecker); dirsHandler = nodeHealthChecker.getDiskHandler(); - + this.context = createNMContext(containerTokenSecretManager, + nmTokenSecretManager); + nodeStatusUpdater = createNodeStatusUpdater(context, dispatcher, nodeHealthChecker); @@ -319,14 +319,19 @@ public class NodeManager extends CompositeService private final NMContainerTokenSecretManager containerTokenSecretManager; private final NMTokenSecretManagerInNM nmTokenSecretManager; private ContainerManagementProtocol containerManager; + private final LocalDirsHandlerService dirsHandler; + private final ApplicationACLsManager aclsManager; private WebServer webServer; private final NodeHealthStatus nodeHealthStatus = RecordFactoryProvider .getRecordFactory(null).newRecordInstance(NodeHealthStatus.class); - + public NMContext(NMContainerTokenSecretManager containerTokenSecretManager, - NMTokenSecretManagerInNM nmTokenSecretManager) { + NMTokenSecretManagerInNM nmTokenSecretManager, + LocalDirsHandlerService dirsHandler, ApplicationACLsManager aclsManager) { this.containerTokenSecretManager = containerTokenSecretManager; this.nmTokenSecretManager = nmTokenSecretManager; + this.dirsHandler = dirsHandler; + this.aclsManager = aclsManager; this.nodeHealthStatus.setIsNodeHealthy(true); this.nodeHealthStatus.setHealthReport("Healthy"); this.nodeHealthStatus.setLastHealthReportTime(System.currentTimeMillis()); @@ -386,6 +391,16 @@ public class NodeManager extends CompositeService public void setNodeId(NodeId nodeId) { this.nodeId = nodeId; } + + @Override + public LocalDirsHandlerService getLocalDirsHandler() { + return dirsHandler; + } + + @Override + public ApplicationACLsManager getApplicationACLsManager() { + return aclsManager; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index 4902836c14c..e2a949c1e38 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -468,8 +468,7 @@ public class ContainerManagerImpl extends CompositeService implements // Create the application Application application = - new ApplicationImpl(dispatcher, this.aclsManager, user, applicationID, - credentials, context); + new ApplicationImpl(dispatcher, user, applicationID, credentials, context); if (null == context.getApplications().putIfAbsent(applicationID, application)) { LOG.info("Creating a new application reference for app " + applicationID); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java index 104896568bf..edf6359a7b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationImpl.java @@ -73,14 +73,13 @@ public class ApplicationImpl implements Application { Map containers = new HashMap(); - public ApplicationImpl(Dispatcher dispatcher, - ApplicationACLsManager aclsManager, String user, ApplicationId appId, + public ApplicationImpl(Dispatcher dispatcher, String user, ApplicationId appId, Credentials credentials, Context context) { this.dispatcher = dispatcher; this.user = user; this.appId = appId; this.credentials = credentials; - this.aclsManager = aclsManager; + this.aclsManager = context.getApplicationACLsManager(); this.context = context; ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); readLock = lock.readLock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java index 452a8237cb8..7d2948ea834 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java @@ -28,36 +28,21 @@ import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.io.InputStreamReader; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; -import java.util.EnumSet; import java.util.List; -import org.apache.commons.io.IOUtils; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.SecureIOUtils; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.yarn.api.records.ApplicationAccessType; -import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.server.nodemanager.Context; -import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; -import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.webapp.NotFoundException; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet; import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.PRE; import org.apache.hadoop.yarn.webapp.view.HtmlBlock; -import org.mortbay.log.Log; import com.google.inject.Inject; @@ -90,19 +75,11 @@ public class ContainerLogsPage extends NMView { public static class ContainersLogsBlock extends HtmlBlock implements YarnWebParams { - private final Configuration conf; private final Context nmContext; - private final ApplicationACLsManager aclsManager; - private final LocalDirsHandlerService dirsHandler; @Inject - public ContainersLogsBlock(Configuration conf, Context context, - ApplicationACLsManager aclsManager, - LocalDirsHandlerService dirsHandler) { - this.conf = conf; + public ContainersLogsBlock(Context context) { this.nmContext = context; - this.aclsManager = aclsManager; - this.dirsHandler = dirsHandler; } @Override @@ -114,229 +91,123 @@ public class ContainerLogsPage extends NMView { " server. Log Server url may not be configured"); //Intentional fallthrough. } - + ContainerId containerId; try { containerId = ConverterUtils.toContainerId($(CONTAINER_ID)); - } catch (IllegalArgumentException e) { - html.h1("Invalid containerId " + $(CONTAINER_ID)); + } catch (IllegalArgumentException ex) { + html.h1("Invalid container ID: " + $(CONTAINER_ID)); return; } - ApplicationId applicationId = containerId.getApplicationAttemptId() - .getApplicationId(); - Application application = this.nmContext.getApplications().get( - applicationId); - Container container = this.nmContext.getContainers().get(containerId); - - if (application == null) { - html.h1( - "Unknown container. Container either has not started or " - + "has already completed or " - + "doesn't belong to this node at all."); - return; - } - if (container == null) { - // Container may have alerady completed, but logs not aggregated yet. - printLogs(html, containerId, applicationId, application); - return; - } - - if (EnumSet.of(ContainerState.NEW, ContainerState.LOCALIZING, - ContainerState.LOCALIZED).contains(container.getContainerState())) { - html.h1("Container is not yet running. Current state is " - + container.getContainerState()); - return; - } - - if (container.getContainerState() == ContainerState.LOCALIZATION_FAILED) { - html.h1("Container wasn't started. Localization failed."); - return; - } - - if (EnumSet.of(ContainerState.RUNNING, - ContainerState.EXITED_WITH_FAILURE, - ContainerState.EXITED_WITH_SUCCESS).contains( - container.getContainerState())) { - printLogs(html, containerId, applicationId, application); - return; - } - if (EnumSet.of(ContainerState.KILLING, - ContainerState.CONTAINER_CLEANEDUP_AFTER_KILL, - ContainerState.CONTAINER_RESOURCES_CLEANINGUP).contains( - container.getContainerState())) { - //Container may have generated some logs before being killed. - printLogs(html, containerId, applicationId, application); - return; - } - if (container.getContainerState().equals(ContainerState.DONE)) { - // Prev state unknown. Logs may be available. - printLogs(html, containerId, applicationId, application); - return; - } else { - html.h1("Container is no longer running..."); - return; - } - } - - private void printLogs(Block html, ContainerId containerId, - ApplicationId applicationId, Application application) { - // Check for the authorization. - String remoteUser = request().getRemoteUser(); - UserGroupInformation callerUGI = null; - - if (remoteUser != null) { - callerUGI = UserGroupInformation.createRemoteUser(remoteUser); - } - if (callerUGI != null - && !this.aclsManager.checkAccess(callerUGI, - ApplicationAccessType.VIEW_APP, application.getUser(), - applicationId)) { - html.h1( - "User [" + remoteUser - + "] is not authorized to view the logs for application " - + applicationId); - return; - } - - if (!$(CONTAINER_LOG_TYPE).isEmpty()) { - File logFile = null; - try { - URI logPathURI = new URI(this.dirsHandler.getLogPathToRead( - ContainerLaunch.getRelativeContainerLogDir( - applicationId.toString(), containerId.toString()) - + Path.SEPARATOR + $(CONTAINER_LOG_TYPE)).toString()); - logFile = new File(logPathURI.getPath()); - } catch (URISyntaxException e) { - html.h1("Cannot find this log on the local disk."); - return; - } catch (Exception e) { - html.h1("Cannot find this log on the local disk."); - return; - } - long start = - $("start").isEmpty() ? -4 * 1024 : Long.parseLong($("start")); - start = start < 0 ? logFile.length() + start : start; - start = start < 0 ? 0 : start; - long end = - $("end").isEmpty() ? logFile.length() : Long.parseLong($("end")); - end = end < 0 ? logFile.length() + end : end; - end = end < 0 ? logFile.length() : end; - if (start > end) { - html.h1("Invalid start and end values. Start: [" + start + "]" - + ", end[" + end + "]"); - return; + try { + if ($(CONTAINER_LOG_TYPE).isEmpty()) { + List logFiles = ContainerLogsUtils.getContainerLogDirs(containerId, + request().getRemoteUser(), nmContext); + printLogFileDirectory(html, logFiles); } else { - FileInputStream logByteStream = null; - - try { - logByteStream = - SecureIOUtils.openForRead(logFile, application.getUser(), null); - } catch (IOException e) { - LOG.error( - "Exception reading log file " + logFile.getAbsolutePath(), e); - if (e.getMessage().contains( - "did not match expected owner '" + application.getUser() - + "'")) { - html.h1("Exception reading log file. Application submitted by '" - + application.getUser() - + "' doesn't own requested log file : " - + logFile.getName()); - } else { - html.h1("Exception reading log file. It might be because log " - + "file was aggregated : " + logFile.getName()); - } - return; - } - - try { - long toRead = end - start; - if (toRead < logFile.length()) { - html.p()._("Showing " + toRead + " bytes. Click ") - .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), - logFile.getName(), "?start=0"), "here"). - _(" for full log")._(); - } - // TODO Fix findBugs close warning along with IOUtils change - IOUtils.skipFully(logByteStream, start); - InputStreamReader reader = new InputStreamReader(logByteStream); - int bufferSize = 65536; - char[] cbuf = new char[bufferSize]; - - int len = 0; - int currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; - PRE pre = html.pre(); - - while ((len = reader.read(cbuf, 0, currentToRead)) > 0 - && toRead > 0) { - pre._(new String(cbuf, 0, len)); - toRead = toRead - len; - currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; - } - - pre._(); - reader.close(); - - } catch (IOException e) { - LOG.error( - "Exception reading log file " + logFile.getAbsolutePath(), e); - html.h1("Exception reading log file. It might be because log " - + "file was aggregated : " + logFile.getName()); - } finally { - if (logByteStream != null) { - try { - logByteStream.close(); - } catch (IOException e) { - // Ignore - } - } - } + File logFile = ContainerLogsUtils.getContainerLogFile(containerId, + $(CONTAINER_LOG_TYPE), request().getRemoteUser(), nmContext); + printLogFile(html, logFile); } + } catch (YarnException ex) { + html.h1(ex.getMessage()); + } catch (NotFoundException ex) { + html.h1(ex.getMessage()); + } + } + + private void printLogFile(Block html, File logFile) { + long start = + $("start").isEmpty() ? -4 * 1024 : Long.parseLong($("start")); + start = start < 0 ? logFile.length() + start : start; + start = start < 0 ? 0 : start; + long end = + $("end").isEmpty() ? logFile.length() : Long.parseLong($("end")); + end = end < 0 ? logFile.length() + end : end; + end = end < 0 ? logFile.length() : end; + if (start > end) { + html.h1("Invalid start and end values. Start: [" + start + "]" + + ", end[" + end + "]"); + return; } else { - // Print out log types in lexical order - List containerLogsDirs = getContainerLogDirs(containerId, - dirsHandler); - Collections.sort(containerLogsDirs); - boolean foundLogFile = false; - for (File containerLogsDir : containerLogsDirs) { - File[] logFiles = containerLogsDir.listFiles(); - if (logFiles != null) { - Arrays.sort(logFiles); - for (File logFile : logFiles) { - foundLogFile = true; - html.p() - .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), - logFile.getName(), "?start=-4096"), - logFile.getName() + " : Total file length is " - + logFile.length() + " bytes.")._(); - } - } - } - if (!foundLogFile) { - html.h1("No logs available for container " + containerId.toString()); + FileInputStream logByteStream = null; + + try { + logByteStream = ContainerLogsUtils.openLogFileForRead($(CONTAINER_ID), + logFile, nmContext); + } catch (IOException ex) { + html.h1(ex.getMessage()); return; } - } - return; - } - - static List getContainerLogDirs(ContainerId containerId, - LocalDirsHandlerService dirsHandler) { - List logDirs = dirsHandler.getLogDirs(); - List containerLogDirs = new ArrayList(logDirs.size()); - for (String logDir : logDirs) { + try { - logDir = new URI(logDir).getPath(); - } catch (URISyntaxException e) { - Log.warn(e.getMessage()); + long toRead = end - start; + if (toRead < logFile.length()) { + html.p()._("Showing " + toRead + " bytes. Click ") + .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), + logFile.getName(), "?start=0"), "here"). + _(" for full log")._(); + } + + IOUtils.skipFully(logByteStream, start); + InputStreamReader reader = new InputStreamReader(logByteStream); + int bufferSize = 65536; + char[] cbuf = new char[bufferSize]; + + int len = 0; + int currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; + PRE pre = html.pre(); + + while ((len = reader.read(cbuf, 0, currentToRead)) > 0 + && toRead > 0) { + pre._(new String(cbuf, 0, len)); + toRead = toRead - len; + currentToRead = toRead > bufferSize ? bufferSize : (int) toRead; + } + + pre._(); + reader.close(); + + } catch (IOException e) { + LOG.error( + "Exception reading log file " + logFile.getAbsolutePath(), e); + html.h1("Exception reading log file. It might be because log " + + "file was aggregated : " + logFile.getName()); + } finally { + if (logByteStream != null) { + try { + logByteStream.close(); + } catch (IOException e) { + // Ignore + } + } } - String appIdStr = ConverterUtils.toString(containerId - .getApplicationAttemptId().getApplicationId()); - File appLogDir = new File(logDir, appIdStr); - String containerIdStr = ConverterUtils.toString(containerId); - containerLogDirs.add(new File(appLogDir, containerIdStr)); } - return containerLogDirs; + } + + private void printLogFileDirectory(Block html, List containerLogsDirs) { + // Print out log types in lexical order + Collections.sort(containerLogsDirs); + boolean foundLogFile = false; + for (File containerLogsDir : containerLogsDirs) { + File[] logFiles = containerLogsDir.listFiles(); + if (logFiles != null) { + Arrays.sort(logFiles); + for (File logFile : logFiles) { + foundLogFile = true; + html.p() + .a(url("containerlogs", $(CONTAINER_ID), $(APP_OWNER), + logFile.getName(), "?start=-4096"), + logFile.getName() + " : Total file length is " + + logFile.length() + " bytes.")._(); + } + } + } + if (!foundLogFile) { + html.h1("No logs available for container " + $(CONTAINER_ID)); + return; + } } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsUtils.java new file mode 100644 index 00000000000..4754daf0ada --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsUtils.java @@ -0,0 +1,190 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ +package org.apache.hadoop.yarn.server.nodemanager.webapp; + +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.SecureIOUtils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.api.records.ApplicationAccessType; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.nodemanager.Context; +import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.webapp.NotFoundException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Contains utilities for fetching a user's log file in a secure fashion. + */ +public class ContainerLogsUtils { + public static final Logger LOG = LoggerFactory.getLogger(ContainerLogsUtils.class); + + /** + * Finds the local directories that logs for the given container are stored + * on. + */ + public static List getContainerLogDirs(ContainerId containerId, + String remoteUser, Context context) throws YarnException { + Container container = context.getContainers().get(containerId); + if (container == null) { + throw new YarnException("Container does not exist."); + } + + Application application = getApplicationForContainer(containerId, context); + checkAccess(remoteUser, application, context); + checkState(container.getContainerState()); + + return getContainerLogDirs(containerId, context.getLocalDirsHandler()); + } + + static List getContainerLogDirs(ContainerId containerId, + LocalDirsHandlerService dirsHandler) throws YarnException { + List logDirs = dirsHandler.getLogDirs(); + List containerLogDirs = new ArrayList(logDirs.size()); + for (String logDir : logDirs) { + try { + logDir = new URI(logDir).getPath(); + } catch (URISyntaxException e) { + throw new YarnException("Internal error", e); + } + String appIdStr = ConverterUtils.toString(containerId + .getApplicationAttemptId().getApplicationId()); + File appLogDir = new File(logDir, appIdStr); + containerLogDirs.add(new File(appLogDir, containerId.toString())); + } + return containerLogDirs; + } + + /** + * Finds the log file with the given filename for the given container. + */ + public static File getContainerLogFile(ContainerId containerId, + String fileName, String remoteUser, Context context) throws YarnException { + Container container = context.getContainers().get(containerId); + if (container == null) { + throw new NotFoundException("Container with id " + containerId + + " not found."); + } + + Application application = getApplicationForContainer(containerId, context); + checkAccess(remoteUser, application, context); + checkState(container.getContainerState()); + + try { + LocalDirsHandlerService dirsHandler = context.getLocalDirsHandler(); + String relativeContainerLogDir = ContainerLaunch.getRelativeContainerLogDir( + application.getAppId().toString(), containerId.toString()); + Path logPath = dirsHandler.getLogPathToRead( + relativeContainerLogDir + Path.SEPARATOR + fileName); + URI logPathURI = new URI(logPath.toString()); + File logFile = new File(logPathURI.getPath()); + return logFile; + } catch (URISyntaxException e) { + throw new YarnException("Internal error", e); + } catch (IOException e) { + LOG.warn("Failed to find log file", e); + throw new NotFoundException("Cannot find this log on the local disk."); + } + } + + private static Application getApplicationForContainer(ContainerId containerId, + Context context) { + ApplicationId applicationId = containerId.getApplicationAttemptId() + .getApplicationId(); + Application application = context.getApplications().get( + applicationId); + + if (application == null) { + throw new NotFoundException( + "Unknown container. Container either has not started or " + + "has already completed or " + + "doesn't belong to this node at all."); + } + return application; + } + + private static void checkAccess(String remoteUser, Application application, + Context context) throws YarnException { + UserGroupInformation callerUGI = null; + if (remoteUser != null) { + callerUGI = UserGroupInformation.createRemoteUser(remoteUser); + } + if (callerUGI != null + && !context.getApplicationACLsManager().checkAccess(callerUGI, + ApplicationAccessType.VIEW_APP, application.getUser(), + application.getAppId())) { + throw new YarnException( + "User [" + remoteUser + + "] is not authorized to view the logs for application " + + application.getAppId()); + } + } + + private static void checkState(ContainerState state) { + if (state == ContainerState.NEW || state == ContainerState.LOCALIZING || + state == ContainerState.LOCALIZED) { + throw new NotFoundException("Container is not yet running. Current state is " + + state); + } + if (state == ContainerState.LOCALIZATION_FAILED) { + throw new NotFoundException("Container wasn't started. Localization failed."); + } + } + + public static FileInputStream openLogFileForRead(String containerIdStr, File logFile, + Context context) throws IOException { + ContainerId containerId = ConverterUtils.toContainerId(containerIdStr); + ApplicationId applicationId = containerId.getApplicationAttemptId() + .getApplicationId(); + String user = context.getApplications().get( + applicationId).getUser(); + + try { + return SecureIOUtils.openForRead(logFile, user, null); + } catch (IOException e) { + if (e.getMessage().contains( + "did not match expected owner '" + user + + "'")) { + LOG.error( + "Exception reading log file " + logFile.getAbsolutePath(), e); + throw new IOException("Exception reading log file. Application submitted by '" + + user + + "' doesn't own requested log file : " + + logFile.getName(), e); + } else { + throw new IOException("Exception reading log file. It might be because log " + + "file was aggregated : " + logFile.getName(), e); + } + } + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java index 168f18a5526..16f2c685c1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/NMWebServices.java @@ -17,19 +17,31 @@ package org.apache.hadoop.yarn.server.nodemanager.webapp; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.OutputStream; import java.util.Map.Entry; +import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletResponse; import javax.ws.rs.GET; import javax.ws.rs.Path; import javax.ws.rs.PathParam; import javax.ws.rs.Produces; import javax.ws.rs.QueryParam; +import javax.ws.rs.WebApplicationException; import javax.ws.rs.core.MediaType; +import javax.ws.rs.core.Response; +import javax.ws.rs.core.Response.Status; +import javax.ws.rs.core.StreamingOutput; import javax.ws.rs.core.UriInfo; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; @@ -59,6 +71,9 @@ public class NMWebServices { private static RecordFactory recordFactory = RecordFactoryProvider .getRecordFactory(null); + private @javax.ws.rs.core.Context + HttpServletRequest request; + private @javax.ws.rs.core.Context HttpServletResponse response; @@ -179,5 +194,66 @@ public class NMWebServices { .toString(), webapp.name()); } - + + /** + * Returns the contents of a container's log file in plain text. + * + * Only works for containers that are still in the NodeManager's memory, so + * logs are no longer available after the corresponding application is no + * longer running. + * + * @param containerIdStr + * The container ID + * @param filename + * The name of the log file + * @return + * The contents of the container's log file + */ + @GET + @Path("/containerlogs/{containerid}/{filename}") + @Produces({ MediaType.TEXT_PLAIN }) + @Public + @Unstable + public Response getLogs(@PathParam("containerid") String containerIdStr, + @PathParam("filename") String filename) { + ContainerId containerId; + try { + containerId = ConverterUtils.toContainerId(containerIdStr); + } catch (IllegalArgumentException ex) { + return Response.status(Status.BAD_REQUEST).build(); + } + + File logFile = null; + try { + logFile = ContainerLogsUtils.getContainerLogFile( + containerId, filename, request.getRemoteUser(), nmContext); + } catch (NotFoundException ex) { + return Response.status(Status.NOT_FOUND).entity(ex.getMessage()).build(); + } catch (YarnException ex) { + return Response.serverError().entity(ex.getMessage()).build(); + } + + try { + final FileInputStream fis = ContainerLogsUtils.openLogFileForRead( + containerIdStr, logFile, nmContext); + + StreamingOutput stream = new StreamingOutput() { + @Override + public void write(OutputStream os) throws IOException, + WebApplicationException { + int bufferSize = 65536; + byte[] buf = new byte[bufferSize]; + int len; + while ((len = fis.read(buf, 0, bufferSize)) > 0) { + os.write(buf, 0, len); + } + os.flush(); + } + }; + + return Response.ok(stream).build(); + } catch (IOException ex) { + return Response.serverError().entity(ex.getMessage()).build(); + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index ba644abf9fe..9cd8f956edf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -79,7 +79,7 @@ public class TestEventFlow { YarnConfiguration conf = new YarnConfiguration(); Context context = new NMContext(new NMContainerTokenSecretManager(conf), - new NMTokenSecretManagerInNM()) { + new NMTokenSecretManagerInNM(), null, null) { @Override public int getHttpPort() { return 1234; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index d2119a75072..3fc5a2dddfb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -1185,7 +1185,7 @@ public class TestNodeStatusUpdater { public MyNMContext( NMContainerTokenSecretManager containerTokenSecretManager, NMTokenSecretManagerInNM nmTokenSecretManager) { - super(containerTokenSecretManager, nmTokenSecretManager); + super(containerTokenSecretManager, nmTokenSecretManager, null, null); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index f49f1189736..b33a58769c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -100,7 +100,7 @@ public abstract class BaseContainerManagerTest { protected static final int HTTP_PORT = 5412; protected Configuration conf = new YarnConfiguration(); protected Context context = new NMContext(new NMContainerTokenSecretManager( - conf), new NMTokenSecretManagerInNM()) { + conf), new NMTokenSecretManagerInNM(), null, new ApplicationACLsManager(conf)) { public int getHttpPort() { return HTTP_PORT; }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java index 63d90c94d0c..429ad454897 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/TestApplication.java @@ -490,6 +490,8 @@ public class TestApplication { when(context.getContainerTokenSecretManager()).thenReturn( new NMContainerTokenSecretManager(conf)); + when(context.getApplicationACLsManager()).thenReturn( + new ApplicationACLsManager(conf)); // Setting master key MasterKey masterKey = new MasterKeyPBImpl(); @@ -501,8 +503,7 @@ public class TestApplication { this.user = user; this.appId = BuilderUtils.newApplicationId(timestamp, id); - app = new ApplicationImpl(dispatcher, new ApplicationACLsManager( - new Configuration()), this.user, appId, null, context); + app = new ApplicationImpl(dispatcher, this.user, appId, null, context); containers = new ArrayList(); for (int i = 0; i < numContainers; i++) { Container container = createMockedContainer(this.appId, i); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java index d5e41a3fd49..edd7cfd6610 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestContainerLogsPage.java @@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; @@ -50,7 +51,6 @@ import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.webapp.ContainerLogsPage.ContainersLogsBlock; -import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.webapp.YarnWebParams; import org.apache.hadoop.yarn.webapp.test.WebAppTests; @@ -63,7 +63,7 @@ import com.google.inject.Module; public class TestContainerLogsPage { @Test(timeout=30000) - public void testContainerLogDirs() throws IOException { + public void testContainerLogDirs() throws IOException, YarnException { File absLogDir = new File("target", TestNMWebServer.class.getSimpleName() + "LogDir").getAbsoluteFile(); String logdirwithFile = absLogDir.toURI().toString(); @@ -86,7 +86,7 @@ public class TestContainerLogsPage { ContainerId container1 = BuilderUtils.newContainerId(recordFactory, appId, appAttemptId, 0); List files = null; - files = ContainerLogsPage.ContainersLogsBlock.getContainerLogDirs( + files = ContainerLogsUtils.getContainerLogDirs( container1, dirsHandler); Assert.assertTrue(!(files.get(0).toString().contains("file:"))); } @@ -146,8 +146,6 @@ public class TestContainerLogsPage { out.write("Log file Content".getBytes()); out.close(); - ApplicationACLsManager aclsManager = mock(ApplicationACLsManager.class); - Context context = mock(Context.class); ConcurrentMap appMap = new ConcurrentHashMap(); @@ -157,7 +155,7 @@ public class TestContainerLogsPage { new ConcurrentHashMap()); ContainersLogsBlock cLogsBlock = - new ContainersLogsBlock(conf, context, aclsManager, dirsHandler); + new ContainersLogsBlock(context); Map params = new HashMap(); params.put(YarnWebParams.CONTAINER_ID, container1.toString()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java index 7b4bcd37ade..eecf0397500 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java @@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; @@ -76,7 +77,7 @@ public class TestNMWebServer { } private int startNMWebAppServer(String webAddr) { - Context nmContext = new NodeManager.NMContext(null, null); + Context nmContext = new NodeManager.NMContext(null, null, null, null); ResourceView resourceView = new ResourceView() { @Override public long getVmemAllocatedForContainers() { @@ -133,8 +134,8 @@ public class TestNMWebServer { } @Test - public void testNMWebApp() throws IOException { - Context nmContext = new NodeManager.NMContext(null, null); + public void testNMWebApp() throws IOException, YarnException { + Context nmContext = new NodeManager.NMContext(null, null, null, null); ResourceView resourceView = new ResourceView() { @Override public long getVmemAllocatedForContainers() { @@ -219,10 +220,10 @@ public class TestNMWebServer { private void writeContainerLogs(Context nmContext, ContainerId containerId, LocalDirsHandlerService dirsHandler) - throws IOException { + throws IOException, YarnException { // ContainerLogDir should be created File containerLogDir = - ContainerLogsPage.ContainersLogsBlock.getContainerLogDirs(containerId, + ContainerLogsUtils.getContainerLogDirs(containerId, dirsHandler).get(0); containerLogDir.mkdirs(); for (String fileType : new String[] { "stdout", "stderr", "syslog" }) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java index 36e1e35b3fe..fe9b09dafc4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServices.java @@ -23,24 +23,38 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import java.io.File; +import java.io.IOException; +import java.io.PrintWriter; import java.io.StringReader; import javax.ws.rs.core.MediaType; import javax.xml.parsers.DocumentBuilder; import javax.xml.parsers.DocumentBuilderFactory; +import junit.framework.Assert; + import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.util.VersionInfo; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; import org.apache.hadoop.yarn.server.nodemanager.NodeHealthCheckerService; import org.apache.hadoop.yarn.server.nodemanager.NodeManager; import org.apache.hadoop.yarn.server.nodemanager.ResourceView; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.ApplicationImpl; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; import org.apache.hadoop.yarn.server.nodemanager.webapp.WebServer.NMWebApp; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.apache.hadoop.yarn.webapp.GenericExceptionHandler; import org.apache.hadoop.yarn.webapp.WebApp; @@ -86,7 +100,14 @@ public class TestNMWebServices extends JerseyTest { private Injector injector = Guice.createInjector(new ServletModule() { @Override protected void configureServlets() { - nmContext = new NodeManager.NMContext(null, null); + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath()); + conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath()); + NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(); + healthChecker.init(conf); + dirsHandler = healthChecker.getDiskHandler(); + aclsManager = new ApplicationACLsManager(conf); + nmContext = new NodeManager.NMContext(null, null, dirsHandler, aclsManager); NodeId nodeId = NodeId.newInstance("testhost.foo.com", 8042); ((NodeManager.NMContext)nmContext).setNodeId(nodeId); resourceView = new ResourceView() { @@ -110,13 +131,6 @@ public class TestNMWebServices extends JerseyTest { return true; } }; - Configuration conf = new Configuration(); - conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath()); - conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath()); - NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(); - healthChecker.init(conf); - dirsHandler = healthChecker.getDiskHandler(); - aclsManager = new ApplicationACLsManager(conf); nmWebApp = new NMWebApp(resourceView, aclsManager, dirsHandler); bind(JAXBContextResolver.class); bind(NMWebServices.class); @@ -292,6 +306,53 @@ public class TestNMWebServices extends JerseyTest { assertEquals("incorrect number of elements", 1, nodes.getLength()); verifyNodesXML(nodes); } + + @Test + public void testContainerLogs() throws IOException { + WebResource r = resource(); + final ContainerId containerId = BuilderUtils.newContainerId(0, 0, 0, 0); + final String containerIdStr = BuilderUtils.newContainerId(0, 0, 0, 0) + .toString(); + final ApplicationAttemptId appAttemptId = containerId.getApplicationAttemptId(); + final ApplicationId appId = appAttemptId.getApplicationId(); + final String appIdStr = appId.toString(); + final String filename = "logfile1"; + final String logMessage = "log message\n"; + nmContext.getApplications().put(appId, new ApplicationImpl(null, "user", + appId, null, nmContext)); + + MockContainer container = new MockContainer(appAttemptId, + new AsyncDispatcher(), new Configuration(), "user", appId, 1); + container.setState(ContainerState.RUNNING); + nmContext.getContainers().put(containerId, container); + + // write out log file + Path path = dirsHandler.getLogPathForWrite( + ContainerLaunch.getRelativeContainerLogDir( + appIdStr, containerIdStr) + "/" + filename, false); + + File logFile = new File(path.toUri().getPath()); + logFile.deleteOnExit(); + assertTrue("Failed to create log dir", logFile.getParentFile().mkdirs()); + PrintWriter pw = new PrintWriter(logFile); + pw.print(logMessage); + pw.close(); + + // ask for it + ClientResponse response = r.path("ws").path("v1").path("node") + .path("containerlogs").path(containerIdStr).path(filename) + .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); + String responseText = response.getEntity(String.class); + assertEquals(logMessage, responseText); + + // ask for file that doesn't exist + response = r.path("ws").path("v1").path("node") + .path("containerlogs").path(containerIdStr).path("uhhh") + .accept(MediaType.TEXT_PLAIN).get(ClientResponse.class); + Assert.assertEquals(Status.NOT_FOUND.getStatusCode(), response.getStatus()); + responseText = response.getEntity(String.class); + assertTrue(responseText.contains("Cannot find this log on the local disk.")); + } public void verifyNodesXML(NodeList nodes) throws JSONException, Exception { for (int i = 0; i < nodes.getLength(); i++) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java index d60d5838927..72c1f6f2c56 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java @@ -93,7 +93,13 @@ public class TestNMWebServicesApps extends JerseyTest { private Injector injector = Guice.createInjector(new ServletModule() { @Override protected void configureServlets() { - nmContext = new NodeManager.NMContext(null, null); + conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath()); + conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath()); + NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(); + healthChecker.init(conf); + dirsHandler = healthChecker.getDiskHandler(); + aclsManager = new ApplicationACLsManager(conf); + nmContext = new NodeManager.NMContext(null, null, dirsHandler, aclsManager); NodeId nodeId = NodeId.newInstance("testhost.foo.com", 9999); ((NodeManager.NMContext)nmContext).setNodeId(nodeId); resourceView = new ResourceView() { @@ -119,12 +125,6 @@ public class TestNMWebServicesApps extends JerseyTest { return true; } }; - conf.set(YarnConfiguration.NM_LOCAL_DIRS, testRootDir.getAbsolutePath()); - conf.set(YarnConfiguration.NM_LOG_DIRS, testLogDir.getAbsolutePath()); - NodeHealthCheckerService healthChecker = new NodeHealthCheckerService(); - healthChecker.init(conf); - dirsHandler = healthChecker.getDiskHandler(); - aclsManager = new ApplicationACLsManager(conf); nmWebApp = new NMWebApp(resourceView, aclsManager, dirsHandler); bind(JAXBContextResolver.class); bind(NMWebServices.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java index 95016c250dd..29c92534d47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java @@ -93,15 +93,6 @@ public class TestNMWebServicesContainers extends JerseyTest { private Injector injector = Guice.createInjector(new ServletModule() { @Override protected void configureServlets() { - nmContext = new NodeManager.NMContext(null, null) { - public NodeId getNodeId() { - return NodeId.newInstance("testhost.foo.com", 8042); - }; - - public int getHttpPort() { - return 1234; - }; - }; resourceView = new ResourceView() { @Override public long getVmemAllocatedForContainers() { @@ -131,6 +122,15 @@ public class TestNMWebServicesContainers extends JerseyTest { healthChecker.init(conf); dirsHandler = healthChecker.getDiskHandler(); aclsManager = new ApplicationACLsManager(conf); + nmContext = new NodeManager.NMContext(null, null, dirsHandler, aclsManager) { + public NodeId getNodeId() { + return NodeId.newInstance("testhost.foo.com", 8042); + }; + + public int getHttpPort() { + return 1234; + }; + }; nmWebApp = new NMWebApp(resourceView, aclsManager, dirsHandler); bind(JAXBContextResolver.class); bind(NMWebServices.class); From 66841c29dbd39c9cfb4cd8ee3563276d9ee43262 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 2 Sep 2013 02:31:27 +0000 Subject: [PATCH 109/153] YARN-1120. Made ApplicationConstants.Environment.USER definition OS neutral as the corresponding value is now set correctly end-to-end. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519330 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 ++++ .../java/org/apache/hadoop/yarn/api/ApplicationConstants.java | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 393a4f59145..18006408a9d 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -78,6 +78,10 @@ Release 2.1.1-beta - UNRELEASED YARN-1117. Improved help messages for "yarn application" and "yarn node" commands. (Xuan Gong via vinodkv) + YARN-1120. Made ApplicationConstants.Environment.USER definition OS neutral + as the corresponding value is now set correctly end-to-end. (Chuan Liu via + vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java index ccf1f37c620..f2e5138a009 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java @@ -80,7 +80,7 @@ public interface ApplicationConstants { * $USER * Final, non-modifiable. */ - USER(Shell.WINDOWS ? "USERNAME": "USER"), + USER("USER"), /** * $LOGNAME From ff69557040fb414254e75bf57287aee1dc7ad855 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 2 Sep 2013 03:10:39 +0000 Subject: [PATCH 110/153] YARN-1077. Fixed TestContainerLaunch test failure on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519333 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../launcher/ContainerLaunch.java | 3 ++- .../launcher/ContainersLauncher.java | 4 +++- .../launcher/TestContainerLaunch.java | 18 ++++++++---------- 4 files changed, 16 insertions(+), 12 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 18006408a9d..a66c7b25f00 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -142,6 +142,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1101. Active nodes can be decremented below 0 (Robert Parker via tgraves) + YARN-1077. Fixed TestContainerLaunch test failure on Windows. (Chuan Liu via + vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index 89812d27efc..58a1be5bf13 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -525,7 +525,8 @@ public class ContainerLaunch implements Callable { @Override public void env(String key, String value) { - line("@set ", key, "=", value); + line("@set ", key, "=", value, + "\nif %errorlevel% neq 0 exit /b %errorlevel%"); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java index 03ddb5641cf..643b29052b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java @@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; @@ -149,7 +150,8 @@ public class ContainersLauncher extends AbstractService dispatcher.getEventHandler().handle( new ContainerExitEvent(containerId, ContainerEventType.CONTAINER_KILLED_ON_REQUEST, - ExitCode.TERMINATED.getExitCode(), + Shell.WINDOWS ? ExitCode.FORCE_KILLED.getExitCode() : + ExitCode.TERMINATED.getExitCode(), "Container terminated before launch.")); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index d25c1c4039b..9842ffc28b0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -240,15 +240,10 @@ public class TestContainerLaunch extends BaseContainerManagerTest { File shellFile = null; try { shellFile = Shell.appendScriptExtension(tmpDir, "hello"); - String timeoutCommand = Shell.WINDOWS ? "@echo \"hello\"" : - "echo \"hello\""; - PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile)); - FileUtil.setExecutable(shellFile, true); - writer.println(timeoutCommand); - writer.close(); Map> resources = new HashMap>(); FileOutputStream fos = new FileOutputStream(shellFile); + FileUtil.setExecutable(shellFile, true); Map env = new HashMap(); // invalid env @@ -270,7 +265,9 @@ public class TestContainerLaunch extends BaseContainerManagerTest { } catch(ExitCodeException e){ diagnostics = e.getMessage(); } - Assert.assertTrue(diagnostics.contains("command not found")); + Assert.assertTrue(diagnostics.contains(Shell.WINDOWS ? + "is not recognized as an internal or external command" : + "command not found")); Assert.assertTrue(shexc.getExitCode() != 0); } finally { @@ -289,15 +286,16 @@ public class TestContainerLaunch extends BaseContainerManagerTest { try { shellFile = Shell.appendScriptExtension(tmpDir, "hello"); // echo "hello" to stdout and "error" to stderr and exit code with 2; - String command = Shell.WINDOWS ? "@echo \"hello\"; @echo \"error\" 1>&2; exit 2;" : - "echo \"hello\"; echo \"error\" 1>&2; exit 2;"; + String command = Shell.WINDOWS ? + "@echo \"hello\" & @echo \"error\" 1>&2 & exit /b 2" : + "echo \"hello\"; echo \"error\" 1>&2; exit 2;"; PrintWriter writer = new PrintWriter(new FileOutputStream(shellFile)); FileUtil.setExecutable(shellFile, true); writer.println(command); writer.close(); Map> resources = new HashMap>(); - FileOutputStream fos = new FileOutputStream(shellFile); + FileOutputStream fos = new FileOutputStream(shellFile, true); Map env = new HashMap(); List commands = new ArrayList(); From c28c96461210294d4f98b39cf24e18dde1428e2c Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Tue, 3 Sep 2013 13:49:25 +0000 Subject: [PATCH 111/153] HDFS-5150. Allow per NN SPN for internal SPNEGO. Contributed By Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519681 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 ++ .../java/org/apache/hadoop/hdfs/server/namenode/NameNode.java | 1 + 2 files changed, 3 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4d7116f4faa..f723ad463b5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -348,6 +348,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5128. Allow multiple net interfaces to be used with HA namenode RPC server. (kihwal) + HDFS-5150. Allow per NN SPN for internal SPNEGO. (kihwal) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java index d2ef66974dc..4e1b9cd77b3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java @@ -182,6 +182,7 @@ public class NameNode implements NameNodeStatusMXBean { DFS_NAMENODE_BACKUP_HTTP_ADDRESS_KEY, DFS_NAMENODE_BACKUP_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_USER_NAME_KEY, + DFS_NAMENODE_INTERNAL_SPNEGO_USER_NAME_KEY, DFS_HA_FENCE_METHODS_KEY, DFS_HA_ZKFC_PORT_KEY, DFS_HA_FENCE_METHODS_KEY From 2d525510b42457dd4b519a0d4e49cac4ffe48a6f Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 3 Sep 2013 22:18:46 +0000 Subject: [PATCH 112/153] YARN-1124. Modified YARN CLI application list to display new and submitted applications together with running apps by default, following up YARN-1074. Contributed by Xuan Gong. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519869 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 ++ .../yarn/client/cli/ApplicationCLI.java | 2 + .../hadoop/yarn/client/cli/TestYarnCLI.java | 51 +++++++++++++++++-- 3 files changed, 53 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index a66c7b25f00..8b25295bfd5 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -82,6 +82,10 @@ Release 2.1.1-beta - UNRELEASED as the corresponding value is now set correctly end-to-end. (Chuan Liu via vinodkv) + YARN-1124. Modified YARN CLI application list to display new and submitted + applications together with running apps by default, following up YARN-1074. + (Xuan Gong via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java index 272c0bf6044..a7b7d654643 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/ApplicationCLI.java @@ -194,6 +194,8 @@ public class ApplicationCLI extends YarnCLI { } else { if (appStates.isEmpty()) { appStates.add(YarnApplicationState.RUNNING); + appStates.add(YarnApplicationState.ACCEPTED); + appStates.add(YarnApplicationState.SUBMITTED); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 670ecbc674d..778fd36dc47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -169,15 +169,35 @@ public class TestYarnCLI { null); applicationReports.add(newApplicationReport4); + ApplicationId applicationId5 = ApplicationId.newInstance(1234, 9); + ApplicationReport newApplicationReport5 = ApplicationReport.newInstance( + applicationId5, ApplicationAttemptId.newInstance(applicationId5, 5), + "user5", "queue5", "appname5", "host5", 128, null, + YarnApplicationState.ACCEPTED, "diagnostics5", "url5", 5, 5, + FinalApplicationStatus.KILLED, null, "N/A", 0.93789f, "HIVE", + null); + applicationReports.add(newApplicationReport5); + + ApplicationId applicationId6 = ApplicationId.newInstance(1234, 10); + ApplicationReport newApplicationReport6 = ApplicationReport.newInstance( + applicationId6, ApplicationAttemptId.newInstance(applicationId6, 6), + "user6", "queue6", "appname6", "host6", 129, null, + YarnApplicationState.SUBMITTED, "diagnostics6", "url6", 6, 6, + FinalApplicationStatus.KILLED, null, "N/A", 0.99789f, "PIG", + null); + applicationReports.add(newApplicationReport6); + // Test command yarn application -list // if the set appStates is empty, RUNNING state will be automatically added // to the appStates list // the output of yarn application -list should be the same as - // equals to yarn application -list --appStates RUNNING + // equals to yarn application -list --appStates RUNNING,ACCEPTED,SUBMITTED Set appType1 = new HashSet(); EnumSet appState1 = EnumSet.noneOf(YarnApplicationState.class); appState1.add(YarnApplicationState.RUNNING); + appState1.add(YarnApplicationState.ACCEPTED); + appState1.add(YarnApplicationState.SUBMITTED); when(client.getApplications(appType1, appState1)).thenReturn( getApplicationReports(applicationReports, appType1, appState1, false)); int result = cli.run(new String[] { "-list" }); @@ -187,7 +207,7 @@ public class TestYarnCLI { ByteArrayOutputStream baos = new ByteArrayOutputStream(); PrintWriter pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType1 - + " and states: " + appState1 + ")" + ":" + 2); + + " and states: " + appState1 + ")" + ":" + 4); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); @@ -203,6 +223,16 @@ public class TestYarnCLI { pw.print("queue3\t RUNNING\t "); pw.print("SUCCEEDED\t 73.79%"); pw.println("\t N/A"); + pw.print(" application_1234_0009\t "); + pw.print("appname5\t HIVE\t user5\t "); + pw.print("queue5\t ACCEPTED\t "); + pw.print("KILLED\t 93.79%"); + pw.println("\t N/A"); + pw.print(" application_1234_0010\t "); + pw.print("appname6\t PIG\t user6\t "); + pw.print("queue6\t SUBMITTED\t "); + pw.print("KILLED\t 99.79%"); + pw.println("\t N/A"); pw.close(); String appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); @@ -210,7 +240,8 @@ public class TestYarnCLI { //Test command yarn application -list --appTypes apptype1,apptype2 //the output should be the same as - //yarn application -list --appTypes apptyp1, apptype2 --appStates RUNNING + // yarn application -list --appTypes apptyp1, apptype2 --appStates + // RUNNING,ACCEPTED,SUBMITTED sysOutStream.reset(); Set appType2 = new HashSet(); appType2.add("YARN"); @@ -219,6 +250,8 @@ public class TestYarnCLI { EnumSet appState2 = EnumSet.noneOf(YarnApplicationState.class); appState2.add(YarnApplicationState.RUNNING); + appState2.add(YarnApplicationState.ACCEPTED); + appState2.add(YarnApplicationState.SUBMITTED); when(client.getApplications(appType2, appState2)).thenReturn( getApplicationReports(applicationReports, appType2, appState2, false)); result = @@ -360,7 +393,7 @@ public class TestYarnCLI { baos = new ByteArrayOutputStream(); pw = new PrintWriter(baos); pw.println("Total number of applications (application-types: " + appType5 - + " and states: " + appState5 + ")" + ":" + 4); + + " and states: " + appState5 + ")" + ":" + 6); pw.print(" Application-Id\t Application-Name"); pw.print("\t Application-Type"); pw.print("\t User\t Queue\t State\t "); @@ -386,6 +419,16 @@ public class TestYarnCLI { pw.print("queue4\t FAILED\t "); pw.print("SUCCEEDED\t 83.79%"); pw.println("\t N/A"); + pw.print(" application_1234_0009\t "); + pw.print("appname5\t HIVE\t user5\t "); + pw.print("queue5\t ACCEPTED\t "); + pw.print("KILLED\t 93.79%"); + pw.println("\t N/A"); + pw.print(" application_1234_0010\t "); + pw.print("appname6\t PIG\t user6\t "); + pw.print("queue6\t SUBMITTED\t "); + pw.print("KILLED\t 99.79%"); + pw.println("\t N/A"); pw.close(); appsReportStr = baos.toString("UTF-8"); Assert.assertEquals(appsReportStr, sysOutStream.toString()); From b87bcbb82dac4f0a0ca7e436998ed0a3d1b4bb21 Mon Sep 17 00:00:00 2001 From: Ivan Mitic Date: Wed, 4 Sep 2013 01:16:04 +0000 Subject: [PATCH 113/153] HADOOP-9924. FileUtil.createJarWithClassPath() does not generate relative classpath correctly. Contributed by Shanyu Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519891 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../java/org/apache/hadoop/fs/FileUtil.java | 9 +++++++- .../org/apache/hadoop/fs/TestFileUtil.java | 23 +++++++++++++------ 3 files changed, 27 insertions(+), 8 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 025aaf1dab6..282bc4303f2 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -444,6 +444,9 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9774. RawLocalFileSystem.listStatus() return absolute paths when input path is relative on Windows. (Shanyu Zhao via ivanmi) + HADOOP-9924. FileUtil.createJarWithClassPath() does not generate relative + classpath correctly. (Shanyu Zhao via ivanmi) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index 4ef4fb2428b..bb203422f39 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -1252,7 +1252,14 @@ public class FileUtil { } } else { // Append just this entry - String classPathEntryUrl = new File(classPathEntry).toURI().toURL() + File fileCpEntry = null; + if(!new Path(classPathEntry).isAbsolute()) { + fileCpEntry = new File(workingDir, classPathEntry); + } + else { + fileCpEntry = new File(classPathEntry); + } + String classPathEntryUrl = fileCpEntry.toURI().toURL() .toExternalForm(); // File.toURI only appends trailing '/' if it can determine that it is a diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index a9646d33b39..3877e83a9b2 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -782,14 +782,23 @@ public class TestFileUtil { expectedClassPaths.add(wildcardMatch.toURI().toURL() .toExternalForm()); } - } else if (nonExistentSubdir.equals(classPath)) { - // expect to maintain trailing path separator if present in input, even - // if directory doesn't exist yet - expectedClassPaths.add(new File(classPath).toURI().toURL() - .toExternalForm() + Path.SEPARATOR); } else { - expectedClassPaths.add(new File(classPath).toURI().toURL() - .toExternalForm()); + File fileCp = null; + if(!new Path(classPath).isAbsolute()) { + fileCp = new File(tmp, classPath); + } + else { + fileCp = new File(classPath); + } + if (nonExistentSubdir.equals(classPath)) { + // expect to maintain trailing path separator if present in input, even + // if directory doesn't exist yet + expectedClassPaths.add(fileCp.toURI().toURL() + .toExternalForm() + Path.SEPARATOR); + } else { + expectedClassPaths.add(fileCp.toURI().toURL() + .toExternalForm()); + } } } List actualClassPaths = Arrays.asList(classPathAttr.split(" ")); From 0e0271b5fdf55c55b825e85c56639a4ae7277a39 Mon Sep 17 00:00:00 2001 From: Luke Lu Date: Wed, 4 Sep 2013 10:34:28 +0000 Subject: [PATCH 114/153] HADOOP-9916. Fix race in ipc.Client retry. (Binglin Chang via llu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1519973 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../java/org/apache/hadoop/ipc/Client.java | 6 +- .../java/org/apache/hadoop/ipc/TestIPC.java | 60 ++++++++++--------- 3 files changed, 36 insertions(+), 32 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 282bc4303f2..4475f044f90 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -406,6 +406,8 @@ Release 2.1.1-beta - UNRELEASED BUG FIXES + HADOOP-9916. Fix race in ipc.Client retry. (Binglin Chang via llu) + HADOOP-9768. chown and chgrp reject users and groups with spaces on platforms where spaces are otherwise acceptable. (cnauroth) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index ae30dd3eb3d..8caa7b288ba 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -1063,8 +1063,8 @@ public class Client { if (status == RpcStatusProto.SUCCESS) { Writable value = ReflectionUtils.newInstance(valueClass, conf); value.readFields(in); // read value - call.setRpcResponse(value); calls.remove(callId); + call.setRpcResponse(value); // verify that length was correct // only for ProtobufEngine where len can be verified easily @@ -1098,8 +1098,8 @@ public class Client { new RemoteException(exceptionClassName, errorMsg) : new RemoteException(exceptionClassName, errorMsg, erCode)); if (status == RpcStatusProto.ERROR) { - call.setException(re); calls.remove(callId); + call.setException(re); } else if (status == RpcStatusProto.FATAL) { // Close the connection markClosed(re); @@ -1166,8 +1166,8 @@ public class Client { Iterator> itor = calls.entrySet().iterator() ; while (itor.hasNext()) { Call c = itor.next().getValue(); + itor.remove(); c.setException(closeException); // local exception - itor.remove(); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index 75f051aaedf..33fb799c0c8 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -216,13 +216,13 @@ public class TestIPC { } } - @Test + @Test(timeout=60000) public void testSerial() throws IOException, InterruptedException { - testSerial(3, false, 2, 5, 100); - testSerial(3, true, 2, 5, 10); + internalTestSerial(3, false, 2, 5, 100); + internalTestSerial(3, true, 2, 5, 10); } - public void testSerial(int handlerCount, boolean handlerSleep, + public void internalTestSerial(int handlerCount, boolean handlerSleep, int clientCount, int callerCount, int callCount) throws IOException, InterruptedException { Server server = new TestServer(handlerCount, handlerSleep); @@ -249,7 +249,7 @@ public class TestIPC { server.stop(); } - @Test + @Test(timeout=60000) public void testStandAloneClient() throws IOException { Client client = new Client(LongWritable.class, conf); InetSocketAddress address = new InetSocketAddress("127.0.0.1", 10); @@ -383,7 +383,7 @@ public class TestIPC { } } - @Test + @Test(timeout=60000) public void testIOEOnClientWriteParam() throws Exception { doErrorTest(IOEOnWriteWritable.class, LongWritable.class, @@ -391,7 +391,7 @@ public class TestIPC { LongWritable.class); } - @Test + @Test(timeout=60000) public void testRTEOnClientWriteParam() throws Exception { doErrorTest(RTEOnWriteWritable.class, LongWritable.class, @@ -399,7 +399,7 @@ public class TestIPC { LongWritable.class); } - @Test + @Test(timeout=60000) public void testIOEOnServerReadParam() throws Exception { doErrorTest(LongWritable.class, IOEOnReadWritable.class, @@ -407,7 +407,7 @@ public class TestIPC { LongWritable.class); } - @Test + @Test(timeout=60000) public void testRTEOnServerReadParam() throws Exception { doErrorTest(LongWritable.class, RTEOnReadWritable.class, @@ -416,7 +416,7 @@ public class TestIPC { } - @Test + @Test(timeout=60000) public void testIOEOnServerWriteResponse() throws Exception { doErrorTest(LongWritable.class, LongWritable.class, @@ -424,7 +424,7 @@ public class TestIPC { LongWritable.class); } - @Test + @Test(timeout=60000) public void testRTEOnServerWriteResponse() throws Exception { doErrorTest(LongWritable.class, LongWritable.class, @@ -432,7 +432,7 @@ public class TestIPC { LongWritable.class); } - @Test + @Test(timeout=60000) public void testIOEOnClientReadResponse() throws Exception { doErrorTest(LongWritable.class, LongWritable.class, @@ -440,7 +440,7 @@ public class TestIPC { IOEOnReadWritable.class); } - @Test + @Test(timeout=60000) public void testRTEOnClientReadResponse() throws Exception { doErrorTest(LongWritable.class, LongWritable.class, @@ -453,7 +453,7 @@ public class TestIPC { * that a ping should have been sent. This is a reproducer for a * deadlock seen in one iteration of HADOOP-6762. */ - @Test + @Test(timeout=60000) public void testIOEOnWriteAfterPingClient() throws Exception { // start server Client.setPingInterval(conf, 100); @@ -481,7 +481,7 @@ public class TestIPC { * Test that, if the socket factory throws an IOE, it properly propagates * to the client. */ - @Test + @Test(timeout=60000) public void testSocketFactoryException() throws IOException { SocketFactory mockFactory = mock(SocketFactory.class); doThrow(new IOException("Injected fault")).when(mockFactory).createSocket(); @@ -503,7 +503,7 @@ public class TestIPC { * failure is handled properly. This is a regression test for * HADOOP-7428. */ - @Test + @Test(timeout=60000) public void testRTEDuringConnectionSetup() throws IOException { // Set up a socket factory which returns sockets which // throw an RTE when setSoTimeout is called. @@ -544,7 +544,7 @@ public class TestIPC { } } - @Test + @Test(timeout=60000) public void testIpcTimeout() throws IOException { // start server Server server = new TestServer(1, true); @@ -566,7 +566,7 @@ public class TestIPC { addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf); } - @Test + @Test(timeout=60000) public void testIpcConnectTimeout() throws IOException { // start server Server server = new TestServer(1, true); @@ -670,31 +670,31 @@ public class TestIPC { return FD_DIR.list().length; } - @Test + @Test(timeout=60000) public void testIpcFromHadoop_0_18_13() throws IOException { doIpcVersionTest(NetworkTraces.HADOOP_0_18_3_RPC_DUMP, NetworkTraces.RESPONSE_TO_HADOOP_0_18_3_RPC); } - @Test + @Test(timeout=60000) public void testIpcFromHadoop0_20_3() throws IOException { doIpcVersionTest(NetworkTraces.HADOOP_0_20_3_RPC_DUMP, NetworkTraces.RESPONSE_TO_HADOOP_0_20_3_RPC); } - @Test + @Test(timeout=60000) public void testIpcFromHadoop0_21_0() throws IOException { doIpcVersionTest(NetworkTraces.HADOOP_0_21_0_RPC_DUMP, NetworkTraces.RESPONSE_TO_HADOOP_0_21_0_RPC); } - @Test + @Test(timeout=60000) public void testHttpGetResponse() throws IOException { doIpcVersionTest("GET / HTTP/1.0\r\n\r\n".getBytes(), Server.RECEIVED_HTTP_REQ_RESPONSE.getBytes()); } - @Test + @Test(timeout=60000) public void testConnectionRetriesOnSocketTimeoutExceptions() throws IOException { Configuration conf = new Configuration(); // set max retries to 0 @@ -720,7 +720,7 @@ public class TestIPC { * (1) the rpc server uses the call id/retry provided by the rpc client, and * (2) the rpc client receives the same call id/retry from the rpc server. */ - @Test + @Test(timeout=60000) public void testCallIdAndRetry() throws IOException { final CallInfo info = new CallInfo(); @@ -772,7 +772,7 @@ public class TestIPC { /** * Test the retry count while used in a retry proxy. */ - @Test + @Test(timeout=60000) public void testRetryProxy() throws IOException { final Client client = new Client(LongWritable.class, conf); @@ -785,7 +785,9 @@ public class TestIPC { } }; - final int totalRetry = 256; + // try more times, so it is easier to find race condition bug + // 10000 times runs about 6s on a core i7 machine + final int totalRetry = 10000; DummyProtocol proxy = (DummyProtocol) Proxy.newProxyInstance( DummyProtocol.class.getClassLoader(), new Class[] { DummyProtocol.class }, new TestInvocationHandler(client, @@ -807,7 +809,7 @@ public class TestIPC { /** * Test if the rpc server gets the default retry count (0) from client. */ - @Test + @Test(timeout=60000) public void testInitialCallRetryCount() throws IOException { // Override client to store the call id final Client client = new Client(LongWritable.class, conf); @@ -838,7 +840,7 @@ public class TestIPC { /** * Test if the rpc server gets the retry count from client. */ - @Test + @Test(timeout=60000) public void testCallRetryCount() throws IOException { final int retryCount = 255; // Override client to store the call id @@ -873,7 +875,7 @@ public class TestIPC { * even if multiple threads are using the same client. * @throws InterruptedException */ - @Test + @Test(timeout=60000) public void testUniqueSequentialCallIds() throws IOException, InterruptedException { int serverThreads = 10, callerCount = 100, perCallerCallCount = 100; From cd663baf162665904b36ddb9cb14017eb2469d9f Mon Sep 17 00:00:00 2001 From: Kihwal Lee Date: Wed, 4 Sep 2013 19:57:44 +0000 Subject: [PATCH 115/153] HADOOP-9932. Improper synchronization in RetryCache. Contributed by Kihwal Lee. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520126 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 ++ .../org/apache/hadoop/ipc/RetryCache.java | 28 ++++++++++++++----- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 4475f044f90..7311d5a1afe 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -449,6 +449,8 @@ Release 2.1.1-beta - UNRELEASED HADOOP-9924. FileUtil.createJarWithClassPath() does not generate relative classpath correctly. (Shanyu Zhao via ivanmi) + HADOOP-9932. Improper synchronization in RetryCache. (kihwal) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java index aeb7ac174ce..102203bee2b 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RetryCache.java @@ -76,6 +76,12 @@ public class RetryCache { this.expirationTime = expirationTime; } + CacheEntry(byte[] clientId, int callId, long expirationTime, + boolean success) { + this(clientId, callId, expirationTime); + this.state = success ? SUCCESS : FAILED; + } + private static int hashCode(long value) { return (int)(value ^ (value >>> 32)); } @@ -147,6 +153,12 @@ public class RetryCache { this.payload = payload; } + CacheEntryWithPayload(byte[] clientId, int callId, Object payload, + long expirationTime, boolean success) { + super(clientId, callId, expirationTime, success); + this.payload = payload; + } + /** Override equals to avoid findbugs warnings */ @Override public boolean equals(Object obj) { @@ -253,18 +265,20 @@ public class RetryCache { */ public void addCacheEntry(byte[] clientId, int callId) { CacheEntry newEntry = new CacheEntry(clientId, callId, System.nanoTime() - + expirationTime); - newEntry.completed(true); - set.put(newEntry); + + expirationTime, true); + synchronized(this) { + set.put(newEntry); + } } public void addCacheEntryWithPayload(byte[] clientId, int callId, Object payload) { - CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload, - System.nanoTime() + expirationTime); // since the entry is loaded from editlog, we can assume it succeeded. - newEntry.completed(true); - set.put(newEntry); + CacheEntry newEntry = new CacheEntryWithPayload(clientId, callId, payload, + System.nanoTime() + expirationTime, true); + synchronized(this) { + set.put(newEntry); + } } private static CacheEntry newEntry(long expirationTime) { From 49afc64cd469bc3a775c18e0458661e39270b7a5 Mon Sep 17 00:00:00 2001 From: Bikas Saha Date: Wed, 4 Sep 2013 20:42:11 +0000 Subject: [PATCH 116/153] YARN-1065. NM should provide AuxillaryService data to the container (Xuan Gong via bikas) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520135 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../yarn/util/AuxiliaryServiceHelper.java | 48 +++++++++++++++++++ .../ContainerManagerImpl.java | 7 ++- .../launcher/ContainerLaunch.java | 15 +++++- .../launcher/ContainersLauncher.java | 8 +++- .../nodemanager/DummyContainerManager.java | 2 +- .../BaseContainerManagerTest.java | 13 +++++ .../container/TestContainer.java | 2 +- .../launcher/TestContainerLaunch.java | 35 +++++++++++++- 9 files changed, 124 insertions(+), 9 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 8b25295bfd5..0e4a5f23eeb 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -86,6 +86,9 @@ Release 2.1.1-beta - UNRELEASED applications together with running apps by default, following up YARN-1074. (Xuan Gong via vinodkv) + YARN-1065. NM should provide AuxillaryService data to the container (Xuan + Gong via bikas) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java new file mode 100644 index 00000000000..23fc50fcec8 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AuxiliaryServiceHelper.java @@ -0,0 +1,48 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.util; + +import java.nio.ByteBuffer; +import java.util.Map; + +import org.apache.commons.codec.binary.Base64; + + +public class AuxiliaryServiceHelper { + + public final static String NM_AUX_SERVICE = "NM_AUX_SERVICE_"; + + public static ByteBuffer getServiceDataFromEnv(String serviceName, + Map env) { + byte[] metaData = + Base64.decodeBase64(env.get(getPrefixServiceName(serviceName))); + return ByteBuffer.wrap(metaData); + } + + public static void setServiceDataIntoEnv(String serviceName, + ByteBuffer metaData, Map env) { + byte[] byteData = metaData.array(); + env.put(getPrefixServiceName(serviceName), + Base64.encodeBase64String(byteData)); + } + + private static String getPrefixServiceName(String serviceName) { + return NM_AUX_SERVICE + serviceName; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index e2a949c1e38..0af4332cef8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -216,7 +216,7 @@ public class ContainerManagerImpl extends CompositeService implements protected ContainersLauncher createContainersLauncher(Context context, ContainerExecutor exec) { - return new ContainersLauncher(context, this.dispatcher, exec, dirsHandler); + return new ContainersLauncher(context, this.dispatcher, exec, dirsHandler, this); } @Override @@ -410,7 +410,7 @@ public class ContainerManagerImpl extends CompositeService implements } } - return StartContainersResponse.newInstance(auxiliaryServices.getMetaData(), + return StartContainersResponse.newInstance(getAuxServiceMetaData(), succeededContainers, failedContainers); } @@ -759,4 +759,7 @@ public class ContainerManagerImpl extends CompositeService implements return this.context; } + public Map getAuxServiceMetaData() { + return this.auxiliaryServices.getMetaData(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index 58a1be5bf13..1bff008541c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -26,6 +26,7 @@ import java.io.File; import java.io.IOException; import java.io.OutputStream; import java.io.PrintStream; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.EnumSet; import java.util.HashMap; @@ -60,6 +61,7 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerDiagnosticsUpdateEvent; @@ -70,6 +72,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.Cont import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.server.nodemanager.util.ProcessIdFileReader; import org.apache.hadoop.yarn.util.Apps; +import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper; import org.apache.hadoop.yarn.util.ConverterUtils; public class ContainerLaunch implements Callable { @@ -88,6 +91,7 @@ public class ContainerLaunch implements Callable { private final Container container; private final Configuration conf; private final Context context; + private final ContainerManagerImpl containerManager; private volatile AtomicBoolean shouldLaunchContainer = new AtomicBoolean(false); private volatile AtomicBoolean completed = new AtomicBoolean(false); @@ -101,7 +105,8 @@ public class ContainerLaunch implements Callable { public ContainerLaunch(Context context, Configuration configuration, Dispatcher dispatcher, ContainerExecutor exec, Application app, - Container container, LocalDirsHandlerService dirsHandler) { + Container container, LocalDirsHandlerService dirsHandler, + ContainerManagerImpl containerManager) { this.context = context; this.conf = configuration; this.app = app; @@ -109,6 +114,7 @@ public class ContainerLaunch implements Callable { this.container = container; this.dispatcher = dispatcher; this.dirsHandler = dirsHandler; + this.containerManager = containerManager; this.sleepDelayBeforeSigKill = conf.getLong(YarnConfiguration.NM_SLEEP_DELAY_BEFORE_SIGKILL_MS, YarnConfiguration.DEFAULT_NM_SLEEP_DELAY_BEFORE_SIGKILL_MS); @@ -227,7 +233,6 @@ public class ContainerLaunch implements Callable { ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, new Path(containerWorkDir, FINAL_CONTAINER_TOKENS_FILE).toUri().getPath()); - // Sanitize the container's environment sanitizeEnv(environment, containerWorkDir, appDirs, containerLogDirs, localResources); @@ -680,6 +685,12 @@ public class ContainerLaunch implements Callable { environment.put(Environment.CLASSPATH.name(), classPathJar); } } + // put AuxiliaryService data to environment + for (Map.Entry meta : containerManager + .getAuxServiceMetaData().entrySet()) { + AuxiliaryServiceHelper.setServiceDataIntoEnv( + meta.getKey(), meta.getValue(), environment); + } } static void writeLaunchEnv(OutputStream out, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java index 643b29052b4..33e3c1c06de 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java @@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.Context; import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.application.Application; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; @@ -65,6 +66,7 @@ public class ContainersLauncher extends AbstractService private final Context context; private final ContainerExecutor exec; private final Dispatcher dispatcher; + private final ContainerManagerImpl containerManager; private LocalDirsHandlerService dirsHandler; @VisibleForTesting @@ -89,12 +91,14 @@ public class ContainersLauncher extends AbstractService public ContainersLauncher(Context context, Dispatcher dispatcher, - ContainerExecutor exec, LocalDirsHandlerService dirsHandler) { + ContainerExecutor exec, LocalDirsHandlerService dirsHandler, + ContainerManagerImpl containerManager) { super("containers-launcher"); this.exec = exec; this.context = context; this.dispatcher = dispatcher; this.dirsHandler = dirsHandler; + this.containerManager = containerManager; } @Override @@ -128,7 +132,7 @@ public class ContainersLauncher extends AbstractService ContainerLaunch launch = new ContainerLaunch(context, getConfig(), dispatcher, exec, app, - event.getContainer(), dirsHandler); + event.getContainer(), dirsHandler, containerManager); running.put(containerId, new RunningContainer(containerLauncher.submit(launch), launch)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java index a2fd96c041e..e6f0db21120 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/DummyContainerManager.java @@ -145,7 +145,7 @@ public class DummyContainerManager extends ContainerManagerImpl { protected ContainersLauncher createContainersLauncher(Context context, ContainerExecutor exec) { return new ContainersLauncher(context, super.dispatcher, exec, - super.dirsHandler) { + super.dirsHandler, this) { @Override public void handle(ContainersLauncherEvent event) { Container container = event.getContainer(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java index b33a58769c2..b02054cef67 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/BaseContainerManagerTest.java @@ -20,8 +20,11 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import java.io.File; import java.io.IOException; +import java.nio.ByteBuffer; import java.util.ArrayList; +import java.util.HashMap; import java.util.List; +import java.util.Map; import junit.framework.Assert; @@ -211,6 +214,16 @@ public abstract class BaseContainerManagerTest { NMTokenIdentifier nmTokenIdentifier) throws InvalidToken { // Do nothing } + + @Override + public Map getAuxServiceMetaData() { + Map serviceData = new HashMap(); + serviceData.put("AuxService1", + ByteBuffer.wrap("AuxServiceMetaData1".getBytes())); + serviceData.put("AuxService2", + ByteBuffer.wrap("AuxServiceMetaData2".getBytes())); + return serviceData; + } }; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java index fc1408b67b8..14d445f5051 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java @@ -650,7 +650,7 @@ public class TestContainer { Context context = mock(Context.class); when(context.getApplications()).thenReturn( new ConcurrentHashMap()); - launcher = new ContainersLauncher(context, dispatcher, null, null); + launcher = new ContainersLauncher(context, dispatcher, null, null, null); // create a mock ExecutorService, which will not really launch // ContainerLaunch at all. launcher.containerLauncher = mock(ExecutorService.class); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 9842ffc28b0..0a0a459bbee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; +import static org.mockito.Mockito.spy; import java.io.BufferedReader; import java.io.File; @@ -28,6 +29,7 @@ import java.io.FileOutputStream; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -37,6 +39,7 @@ import java.util.Map; import junit.framework.Assert; +import org.apache.commons.codec.binary.Base64; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -70,11 +73,13 @@ import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor; import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerExitEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; @@ -381,6 +386,12 @@ public class TestContainerLaunch extends BaseContainerManagerTest { + processStartFile); fileWriter.println("@echo " + Environment.HOME.$() + ">> " + processStartFile); + for (String serviceName : containerManager.getAuxServiceMetaData() + .keySet()) { + fileWriter.println("@echo" + AuxiliaryServiceHelper.NM_AUX_SERVICE + + serviceName + " >> " + + processStartFile); + } fileWriter.println("@echo " + cId + ">> " + processStartFile); fileWriter.println("@ping -n 100 127.0.0.1 >nul"); } else { @@ -403,6 +414,12 @@ public class TestContainerLaunch extends BaseContainerManagerTest { + processStartFile); fileWriter.write("\necho $" + Environment.HOME.name() + " >> " + processStartFile); + for (String serviceName : containerManager.getAuxServiceMetaData() + .keySet()) { + fileWriter.write("\necho $" + AuxiliaryServiceHelper.NM_AUX_SERVICE + + serviceName + " >> " + + processStartFile); + } fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); } @@ -487,6 +504,12 @@ public class TestContainerLaunch extends BaseContainerManagerTest { YarnConfiguration.DEFAULT_NM_USER_HOME_DIR), reader.readLine()); + for (String serviceName : containerManager.getAuxServiceMetaData().keySet()) { + Assert.assertEquals( + containerManager.getAuxServiceMetaData().get(serviceName), + ByteBuffer.wrap(Base64.decodeBase64(reader.readLine().getBytes()))); + } + Assert.assertEquals(cId.toString(), containerLaunchContext .getEnvironment().get(Environment.CONTAINER_ID.name())); Assert.assertEquals(context.getNodeId().getHost(), containerLaunchContext @@ -557,6 +580,16 @@ public class TestContainerLaunch extends BaseContainerManagerTest { DefaultContainerExecutor.containerIsAlive(pid)); } + @Test (timeout = 5000) + public void testAuxiliaryServiceHelper() throws Exception { + Map env = new HashMap(); + String serviceName = "testAuxiliaryService"; + ByteBuffer bb = ByteBuffer.wrap("testAuxiliaryService".getBytes()); + AuxiliaryServiceHelper.setServiceDataIntoEnv(serviceName, bb, env); + Assert.assertEquals(bb, + AuxiliaryServiceHelper.getServiceDataFromEnv(serviceName, env)); + } + @Test public void testDelayedKill() throws Exception { containerManager.start(); @@ -703,7 +736,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest { }; when(dispatcher.getEventHandler()).thenReturn(eventHandler); ContainerLaunch launch = new ContainerLaunch(context, new Configuration(), - dispatcher, exec, null, container, dirsHandler); + dispatcher, exec, null, container, dirsHandler, containerManager); launch.call(); } From 5540d77e2f8cfcaa5db3a5bacac7c1737fd48999 Mon Sep 17 00:00:00 2001 From: Jason Darrell Lowe Date: Wed, 4 Sep 2013 22:23:40 +0000 Subject: [PATCH 117/153] MAPREDUCE-5475. MRClientService does not verify ACLs properly. Contributed by Jason Lowe git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520156 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 + .../v2/app/client/MRClientService.java | 64 +++++++++----- .../mapreduce/v2/app/TestMRClientService.java | 85 +++++++++++++++++++ 3 files changed, 131 insertions(+), 22 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 1cc5e1b333c..e3e0f25e83b 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -251,6 +251,8 @@ Release 2.1.1-beta - UNRELEASED commands to reboot, so that client can continue to track the overall job. (Jian He via vinodkv) + MAPREDUCE-5475. MRClientService does not verify ACLs properly (jlowe) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES @@ -1329,6 +1331,8 @@ Release 0.23.10 - UNRELEASED MAPREDUCE-5001. LocalJobRunner has race condition resulting in job failures (Sandy Ryza via jlowe) + MAPREDUCE-5475. MRClientService does not verify ACLs properly (jlowe) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 4bb39696e1e..d36bf62fdf0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -28,6 +28,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.ipc.Server; +import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; @@ -78,6 +79,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType; import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider; import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -175,16 +178,22 @@ public class MRClientService extends AbstractService return getBindAddress(); } - private Job verifyAndGetJob(JobId jobID, - boolean modifyAccess) throws IOException { + private Job verifyAndGetJob(JobId jobID, + JobACL accessType) throws IOException { Job job = appContext.getJob(jobID); + UserGroupInformation ugi = UserGroupInformation.getCurrentUser(); + if (!job.checkAccess(ugi, accessType)) { + throw new AccessControlException("User " + ugi.getShortUserName() + + " cannot perform operation " + accessType.name() + " on " + + jobID); + } return job; } private Task verifyAndGetTask(TaskId taskID, - boolean modifyAccess) throws IOException { + JobACL accessType) throws IOException { Task task = verifyAndGetJob(taskID.getJobId(), - modifyAccess).getTask(taskID); + accessType).getTask(taskID); if (task == null) { throw new IOException("Unknown Task " + taskID); } @@ -192,9 +201,9 @@ public class MRClientService extends AbstractService } private TaskAttempt verifyAndGetAttempt(TaskAttemptId attemptID, - boolean modifyAccess) throws IOException { + JobACL accessType) throws IOException { TaskAttempt attempt = verifyAndGetTask(attemptID.getTaskId(), - modifyAccess).getAttempt(attemptID); + accessType).getAttempt(attemptID); if (attempt == null) { throw new IOException("Unknown TaskAttempt " + attemptID); } @@ -205,7 +214,7 @@ public class MRClientService extends AbstractService public GetCountersResponse getCounters(GetCountersRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetCountersResponse response = recordFactory.newRecordInstance(GetCountersResponse.class); response.setCounters(TypeConverter.toYarn(job.getAllCounters())); @@ -216,7 +225,7 @@ public class MRClientService extends AbstractService public GetJobReportResponse getJobReport(GetJobReportRequest request) throws IOException { JobId jobId = request.getJobId(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetJobReportResponse response = recordFactory.newRecordInstance(GetJobReportResponse.class); if (job != null) { @@ -235,7 +244,7 @@ public class MRClientService extends AbstractService GetTaskAttemptReportResponse response = recordFactory.newRecordInstance(GetTaskAttemptReportResponse.class); response.setTaskAttemptReport( - verifyAndGetAttempt(taskAttemptId, false).getReport()); + verifyAndGetAttempt(taskAttemptId, JobACL.VIEW_JOB).getReport()); return response; } @@ -245,7 +254,8 @@ public class MRClientService extends AbstractService TaskId taskId = request.getTaskId(); GetTaskReportResponse response = recordFactory.newRecordInstance(GetTaskReportResponse.class); - response.setTaskReport(verifyAndGetTask(taskId, false).getReport()); + response.setTaskReport( + verifyAndGetTask(taskId, JobACL.VIEW_JOB).getReport()); return response; } @@ -256,7 +266,7 @@ public class MRClientService extends AbstractService JobId jobId = request.getJobId(); int fromEventId = request.getFromEventId(); int maxEvents = request.getMaxEvents(); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); GetTaskAttemptCompletionEventsResponse response = recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsResponse.class); @@ -270,9 +280,11 @@ public class MRClientService extends AbstractService public KillJobResponse killJob(KillJobRequest request) throws IOException { JobId jobId = request.getJobId(); - String message = "Kill Job received from client " + jobId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill job " + jobId + " received from " + callerUGI + + " at " + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetJob(jobId, true); + verifyAndGetJob(jobId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new JobDiagnosticsUpdateEvent(jobId, message)); appContext.getEventHandler().handle( @@ -287,9 +299,11 @@ public class MRClientService extends AbstractService public KillTaskResponse killTask(KillTaskRequest request) throws IOException { TaskId taskId = request.getTaskId(); - String message = "Kill task received from client " + taskId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill task " + taskId + " received from " + callerUGI + + " at " + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetTask(taskId, true); + verifyAndGetTask(taskId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskEvent(taskId, TaskEventType.T_KILL)); KillTaskResponse response = @@ -302,9 +316,12 @@ public class MRClientService extends AbstractService public KillTaskAttemptResponse killTaskAttempt( KillTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - String message = "Kill task attempt received from client " + taskAttemptId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Kill task attempt " + taskAttemptId + + " received from " + callerUGI + " at " + + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetAttempt(taskAttemptId, true); + verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -322,8 +339,8 @@ public class MRClientService extends AbstractService GetDiagnosticsResponse response = recordFactory.newRecordInstance(GetDiagnosticsResponse.class); - response.addAllDiagnostics( - verifyAndGetAttempt(taskAttemptId, false).getDiagnostics()); + response.addAllDiagnostics(verifyAndGetAttempt(taskAttemptId, + JobACL.VIEW_JOB).getDiagnostics()); return response; } @@ -332,9 +349,12 @@ public class MRClientService extends AbstractService public FailTaskAttemptResponse failTaskAttempt( FailTaskAttemptRequest request) throws IOException { TaskAttemptId taskAttemptId = request.getTaskAttemptId(); - String message = "Fail task attempt received from client " + taskAttemptId; + UserGroupInformation callerUGI = UserGroupInformation.getCurrentUser(); + String message = "Fail task attempt " + taskAttemptId + + " received from " + callerUGI + " at " + + Server.getRemoteAddress(); LOG.info(message); - verifyAndGetAttempt(taskAttemptId, true); + verifyAndGetAttempt(taskAttemptId, JobACL.MODIFY_JOB); appContext.getEventHandler().handle( new TaskAttemptDiagnosticsUpdateEvent(taskAttemptId, message)); appContext.getEventHandler().handle( @@ -356,7 +376,7 @@ public class MRClientService extends AbstractService GetTaskReportsResponse response = recordFactory.newRecordInstance(GetTaskReportsResponse.class); - Job job = verifyAndGetJob(jobId, false); + Job job = verifyAndGetJob(jobId, JobACL.VIEW_JOB); Collection tasks = job.getTasks(taskType).values(); LOG.info("Getting task report for " + taskType + " " + jobId + ". Report-size will be " + tasks.size()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java index 34b8dc76354..b17b8ce7adc 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java @@ -18,13 +18,20 @@ package org.apache.hadoop.mapreduce.v2.app; +import static org.junit.Assert.fail; + +import java.security.PrivilegedExceptionAction; import java.util.Iterator; import java.util.List; import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.JobACL; +import org.apache.hadoop.mapreduce.MRConfig; +import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest; @@ -32,6 +39,9 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompleti import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillJobRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskAttemptRequest; +import org.apache.hadoop.mapreduce.v2.api.protocolrecords.KillTaskRequest; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobReport; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -51,6 +61,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent; import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus; +import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -169,6 +181,79 @@ public class TestMRClientService { app.waitForState(job, JobState.SUCCEEDED); } + @Test + public void testViewAclOnlyCannotModify() throws Exception { + final MRAppWithClientService app = new MRAppWithClientService(1, 0, false); + final Configuration conf = new Configuration(); + conf.setBoolean(MRConfig.MR_ACLS_ENABLED, true); + conf.set(MRJobConfig.JOB_ACL_VIEW_JOB, "viewonlyuser"); + Job job = app.submit(conf); + app.waitForState(job, JobState.RUNNING); + Assert.assertEquals("Num tasks not correct", 1, job.getTasks().size()); + Iterator it = job.getTasks().values().iterator(); + Task task = it.next(); + app.waitForState(task, TaskState.RUNNING); + TaskAttempt attempt = task.getAttempts().values().iterator().next(); + app.waitForState(attempt, TaskAttemptState.RUNNING); + + UserGroupInformation viewOnlyUser = + UserGroupInformation.createUserForTesting( + "viewonlyuser", new String[] {}); + Assert.assertTrue("viewonlyuser cannot view job", + job.checkAccess(viewOnlyUser, JobACL.VIEW_JOB)); + Assert.assertFalse("viewonlyuser can modify job", + job.checkAccess(viewOnlyUser, JobACL.MODIFY_JOB)); + MRClientProtocol client = viewOnlyUser.doAs( + new PrivilegedExceptionAction() { + @Override + public MRClientProtocol run() throws Exception { + YarnRPC rpc = YarnRPC.create(conf); + return (MRClientProtocol) rpc.getProxy(MRClientProtocol.class, + app.clientService.getBindAddress(), conf); + } + }); + + KillJobRequest killJobRequest = recordFactory.newRecordInstance( + KillJobRequest.class); + killJobRequest.setJobId(app.getJobId()); + try { + client.killJob(killJobRequest); + fail("viewonlyuser killed job"); + } catch (AccessControlException e) { + // pass + } + + KillTaskRequest killTaskRequest = recordFactory.newRecordInstance( + KillTaskRequest.class); + killTaskRequest.setTaskId(task.getID()); + try { + client.killTask(killTaskRequest); + fail("viewonlyuser killed task"); + } catch (AccessControlException e) { + // pass + } + + KillTaskAttemptRequest killTaskAttemptRequest = + recordFactory.newRecordInstance(KillTaskAttemptRequest.class); + killTaskAttemptRequest.setTaskAttemptId(attempt.getID()); + try { + client.killTaskAttempt(killTaskAttemptRequest); + fail("viewonlyuser killed task attempt"); + } catch (AccessControlException e) { + // pass + } + + FailTaskAttemptRequest failTaskAttemptRequest = + recordFactory.newRecordInstance(FailTaskAttemptRequest.class); + failTaskAttemptRequest.setTaskAttemptId(attempt.getID()); + try { + client.failTaskAttempt(failTaskAttemptRequest); + fail("viewonlyuser killed task attempt"); + } catch (AccessControlException e) { + // pass + } + } + private void verifyJobReport(JobReport jr) { Assert.assertNotNull("JobReport is null", jr); List amInfos = jr.getAMInfos(); From 1e513bfc68c8de2976e3340cb83b6763c5d16813 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Thu, 5 Sep 2013 01:20:03 +0000 Subject: [PATCH 118/153] YARN-957. Fixed a bug in CapacityScheduler because of which requests that need more than a node's total capability were incorrectly allocated on that node causing apps to hang. Contributed by Omkar Vinit Joshi. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520187 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 + .../scheduler/SchedulerNode.java | 5 + .../scheduler/capacity/LeafQueue.java | 8 +- .../common/fica/FiCaSchedulerNode.java | 9 ++ .../scheduler/fair/FSSchedulerNode.java | 9 ++ .../yarn/server/resourcemanager/MockRM.java | 8 ++ .../capacity/TestContainerAllocation.java | 109 ++++++++++++++++++ 7 files changed, 151 insertions(+), 1 deletion(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 0e4a5f23eeb..f3cd1c30726 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -152,6 +152,10 @@ Release 2.1.1-beta - UNRELEASED YARN-1077. Fixed TestContainerLaunch test failure on Windows. (Chuan Liu via vinodkv) + YARN-957. Fixed a bug in CapacityScheduler because of which requests that + need more than a node's total capability were incorrectly allocated on that + node causing apps to hang. (Omkar Vinit Joshi via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index 8a80bf8cf9a..2974b9dc05a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -67,4 +67,9 @@ public abstract class SchedulerNode { */ public abstract int getNumContainers(); + /** + * Get total resources on the node. + * @return total resources on the node. + */ + public abstract Resource getTotalResource(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index 41b3f5e3037..8624ec0e87d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -1308,9 +1308,15 @@ public class LeafQueue implements CSQueue { + " request=" + request + " type=" + type); } Resource capability = request.getCapability(); - Resource available = node.getAvailableResource(); + Resource totalResource = node.getTotalResource(); + if (!Resources.fitsIn(capability, totalResource)) { + LOG.warn("Node : " + node.getNodeID() + + " does not have sufficient resource for request : " + request + + " node total capability : " + node.getTotalResource()); + return Resources.none(); + } assert Resources.greaterThan( resourceCalculator, clusterResource, available, Resources.none()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java index 7a306ec4281..400b3153dcf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/common/fica/FiCaSchedulerNode.java @@ -49,6 +49,7 @@ public class FiCaSchedulerNode extends SchedulerNode { private Resource availableResource = recordFactory.newRecordInstance(Resource.class); private Resource usedResource = recordFactory.newRecordInstance(Resource.class); + private Resource totalResourceCapability; private volatile int numContainers; @@ -65,6 +66,9 @@ public class FiCaSchedulerNode extends SchedulerNode { this.rmNode = node; this.availableResource.setMemory(node.getTotalCapability().getMemory()); this.availableResource.setVirtualCores(node.getTotalCapability().getVirtualCores()); + totalResourceCapability = + Resource.newInstance(node.getTotalCapability().getMemory(), node + .getTotalCapability().getVirtualCores()); if (usePortForNodeName) { nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort(); } else { @@ -126,6 +130,11 @@ public class FiCaSchedulerNode extends SchedulerNode { return this.usedResource; } + @Override + public Resource getTotalResource() { + return this.totalResourceCapability; + } + private synchronized boolean isValidContainer(Container c) { if (launchedContainers.containsKey(c.getId())) return true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java index bd29f821bb4..d84547a3ffb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSSchedulerNode.java @@ -52,6 +52,7 @@ public class FSSchedulerNode extends SchedulerNode { private Resource availableResource; private Resource usedResource = recordFactory.newRecordInstance(Resource.class); + private Resource totalResourceCapability; private volatile int numContainers; @@ -68,6 +69,9 @@ public class FSSchedulerNode extends SchedulerNode { public FSSchedulerNode(RMNode node, boolean usePortForNodeName) { this.rmNode = node; this.availableResource = Resources.clone(node.getTotalCapability()); + totalResourceCapability = + Resource.newInstance(node.getTotalCapability().getMemory(), node + .getTotalCapability().getVirtualCores()); if (usePortForNodeName) { nodeName = rmNode.getHostName() + ":" + node.getNodeID().getPort(); } else { @@ -173,6 +177,11 @@ public class FSSchedulerNode extends SchedulerNode { Resources.subtractFrom(usedResource, resource); } + @Override + public Resource getTotalResource() { + return this.totalResourceCapability; + } + private synchronized void deductAvailableResource(Resource resource) { if (resource == null) { LOG.error("Invalid deduction of null resource for " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 3e2a8906a98..0f6f8a1fd7c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -232,6 +232,14 @@ public class MockRM extends ResourceManager { return nm; } + public MockNM registerNode(String nodeIdStr, int memory, int vCores) + throws Exception { + MockNM nm = + new MockNM(nodeIdStr, memory, vCores, getResourceTrackerService()); + nm.registerNode(); + return nm; + } + public void sendNodeStarted(MockNM nm) throws Exception { RMNodeImpl node = (RMNodeImpl) getRMContext().getRMNodes().get( nm.getNodeId()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java new file mode 100644 index 00000000000..b877fbbf98f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -0,0 +1,109 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; + +import junit.framework.Assert; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.MockAM; +import org.apache.hadoop.yarn.server.resourcemanager.MockNM; +import org.apache.hadoop.yarn.server.resourcemanager.MockRM; +import org.apache.hadoop.yarn.server.resourcemanager.TestFifoScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.junit.Test; + + +public class TestContainerAllocation { + + private static final Log LOG = LogFactory.getLog(TestFifoScheduler.class); + + private final int GB = 1024; + + @Test(timeout = 3000000) + public void testExcessReservationThanNodeManagerCapacity() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + MockRM rm = new MockRM(conf); + rm.start(); + + // Register node1 + MockNM nm1 = rm.registerNode("127.0.0.1:1234", 2 * GB, 4); + MockNM nm2 = rm.registerNode("127.0.0.1:2234", 3 * GB, 4); + + nm1.nodeHeartbeat(true); + nm2.nodeHeartbeat(true); + + // wait.. + int waitCount = 20; + int size = rm.getRMContext().getRMNodes().size(); + while ((size = rm.getRMContext().getRMNodes().size()) != 2 + && waitCount-- > 0) { + LOG.info("Waiting for node managers to register : " + size); + Thread.sleep(100); + } + Assert.assertEquals(2, rm.getRMContext().getRMNodes().size()); + // Submit an application + RMApp app1 = rm.submitApp(128); + + // kick the scheduling + nm1.nodeHeartbeat(true); + RMAppAttempt attempt1 = app1.getCurrentAppAttempt(); + MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId()); + am1.registerAppAttempt(); + + LOG.info("sending container requests "); + am1.addRequests(new String[] {"*"}, 3 * GB, 1, 1); + AllocateResponse alloc1Response = am1.schedule(); // send the request + + // kick the scheduler + nm1.nodeHeartbeat(true); + int waitCounter = 20; + LOG.info("heartbeating nm1"); + while (alloc1Response.getAllocatedContainers().size() < 1 + && waitCounter-- > 0) { + LOG.info("Waiting for containers to be created for app 1..."); + Thread.sleep(500); + alloc1Response = am1.schedule(); + } + LOG.info("received container : " + + alloc1Response.getAllocatedContainers().size()); + + // No container should be allocated. + // Internally it should not been reserved. + Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 0); + + LOG.info("heartbeating nm2"); + waitCounter = 20; + nm2.nodeHeartbeat(true); + while (alloc1Response.getAllocatedContainers().size() < 1 + && waitCounter-- > 0) { + LOG.info("Waiting for containers to be created for app 1..."); + Thread.sleep(500); + alloc1Response = am1.schedule(); + } + LOG.info("received container : " + + alloc1Response.getAllocatedContainers().size()); + Assert.assertTrue(alloc1Response.getAllocatedContainers().size() == 1); + + rm.stop(); + } +} \ No newline at end of file From 4f4680111143ff50b78c34792ebd452c7fbec504 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 5 Sep 2013 01:46:14 +0000 Subject: [PATCH 119/153] HADOOP-9915. o.a.h.fs.Stat support on Mac OS X (Contributed by Binglin Chang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520190 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 3 +++ .../main/java/org/apache/hadoop/fs/Stat.java | 4 ++-- .../java/org/apache/hadoop/fs/TestStat.java | 22 ++++++++++++++++++- 3 files changed, 26 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 7311d5a1afe..e47f300828d 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -105,6 +105,9 @@ Trunk (Unreleased) HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel) + HADOOP-9915. o.a.h.fs.Stat support on Mac OS X (Binglin Chang via Colin + Patrick McCabe) + BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java index 960f5cef3c3..c2ec63c6e70 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java @@ -80,7 +80,7 @@ public class Stat extends Shell { * @return */ public static boolean isAvailable() { - if (Shell.LINUX || Shell.FREEBSD) { + if (Shell.LINUX || Shell.FREEBSD || Shell.MAC) { return true; } return false; @@ -100,7 +100,7 @@ public class Stat extends Shell { if (Shell.LINUX) { return new String[] { "stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() }; - } else if (Shell.FREEBSD) { + } else if (Shell.FREEBSD || Shell.MAC) { return new String[] { "stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'", path.toString() }; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java index 506facf0c64..a46a5ced098 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java @@ -19,6 +19,7 @@ package org.apache.hadoop.fs; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.assertFalse; import static org.junit.Assert.fail; import java.io.BufferedReader; @@ -26,10 +27,11 @@ import java.io.FileNotFoundException; import java.io.StringReader; import org.apache.hadoop.conf.Configuration; +import org.junit.Assume; import org.junit.BeforeClass; import org.junit.Test; -public class TestStat { +public class TestStat extends FileSystemTestHelper { private static Stat stat; @@ -113,6 +115,7 @@ public class TestStat { @Test(timeout=10000) public void testStatFileNotFound() throws Exception { + Assume.assumeTrue(Stat.isAvailable()); try { stat.getFileStatus(); fail("Expected FileNotFoundException"); @@ -125,4 +128,21 @@ public class TestStat { public void testStatEnvironment() throws Exception { assertEquals(stat.getEnvironment("LANG"), "C"); } + + @Test(timeout=10000) + public void testStat() throws Exception { + Assume.assumeTrue(Stat.isAvailable()); + FileSystem fs = FileSystem.getLocal(new Configuration()); + Path testDir = new Path(getTestRootPath(fs), "teststat"); + fs.mkdirs(testDir); + Path sub1 = new Path(testDir, "sub1"); + Path sub2 = new Path(testDir, "sub2"); + fs.mkdirs(sub1); + fs.createSymlink(sub1, sub2, false); + FileStatus stat1 = new Stat(sub1, 4096l, false, fs).getFileStatus(); + FileStatus stat2 = new Stat(sub2, 0, false, fs).getFileStatus(); + assertTrue(stat1.isDirectory()); + assertFalse(stat2.isDirectory()); + fs.delete(testDir, true); + } } From ec135d560b46d166a443df4314b24de82afbdbc2 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Thu, 5 Sep 2013 02:00:51 +0000 Subject: [PATCH 120/153] CHANGES.txt: move HADOOP-9915 to branch-2.3 section git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520193 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e47f300828d..ac047c854d5 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -105,9 +105,6 @@ Trunk (Unreleased) HADOOP-9833 move slf4j to version 1.7.5 (Kousuke Saruta via stevel) - HADOOP-9915. o.a.h.fs.Stat support on Mac OS X (Binglin Chang via Colin - Patrick McCabe) - BUG FIXES HADOOP-9451. Fault single-layer config if node group topology is enabled. @@ -333,6 +330,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9889. Refresh the Krb5 configuration when creating a new kdc in Hadoop-MiniKDC (Wei Yan via Sandy Ryza) + HADOOP-9915. o.a.h.fs.Stat support on Mac OS X (Binglin Chang via Colin + Patrick McCabe) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) From e8f6f74025e7face1756fbce29a57509f7b06716 Mon Sep 17 00:00:00 2001 From: Todd Lipcon Date: Thu, 5 Sep 2013 14:41:25 +0000 Subject: [PATCH 121/153] HADOOP-9908. Fix NPE when versioninfo properties file is missing. Contributed by Todd Lipcon. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520333 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/CHANGES.txt | 2 ++ .../src/main/java/org/apache/hadoop/util/VersionInfo.java | 3 +++ 2 files changed, 5 insertions(+) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index ac047c854d5..c2beef64124 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -363,6 +363,8 @@ Release 2.3.0 - UNRELEASED HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG. (Shinichi Yamashita via Andrew Wang) + HADOOP-9908. Fix NPE when versioninfo properties file is missing (todd) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java index 0f08f15ffa4..1547577b864 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java @@ -48,6 +48,9 @@ public class VersionInfo { try { InputStream is = Thread.currentThread().getContextClassLoader() .getResourceAsStream(versionInfoFile); + if (is == null) { + throw new IOException("Resource not found"); + } info.load(is); } catch (IOException ex) { LogFactory.getLog(getClass()).warn("Could not read '" + From 8aea748ec37b8e66c222f704eacedb47d9c73cfd Mon Sep 17 00:00:00 2001 From: Aaron Myers Date: Thu, 5 Sep 2013 16:21:26 +0000 Subject: [PATCH 122/153] HDFS-5159. Secondary NameNode fails to checkpoint if error occurs downloading edits on first checkpoint. Contributed by Aaron T. Myers. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520363 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../server/namenode/SecondaryNameNode.java | 6 +- .../hdfs/server/namenode/TestCheckpoint.java | 59 +++++++++++++++++-- 3 files changed, 60 insertions(+), 8 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f723ad463b5..293de2d515d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -415,6 +415,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5140. Too many safemode monitor threads being created in the standby namenode causing it to fail with out of memory error. (jing9) + HDFS-5159. Secondary NameNode fails to checkpoint if error occurs + downloading edits on first checkpoint. (atm) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index 844c77f1cfa..d411d33c936 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -429,10 +429,8 @@ public class SecondaryNameNode implements Runnable { dstImage.getStorage().cTime = sig.cTime; // get fsimage - boolean downloadImage = true; if (sig.mostRecentCheckpointTxId == dstImage.getStorage().getMostRecentCheckpointTxId()) { - downloadImage = false; LOG.info("Image has not changed. Will not download image."); } else { LOG.info("Image has changed. Downloading updated image from NN."); @@ -448,7 +446,9 @@ public class SecondaryNameNode implements Runnable { nnHostPort, log, dstImage.getStorage()); } - return Boolean.valueOf(downloadImage); + // true if we haven't loaded all the transactions represented by the + // downloaded fsimage. + return dstImage.getLastAppliedTxId() < sig.mostRecentCheckpointTxId; } }); return b.booleanValue(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java index 4676e8524b4..b49953dd472 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java @@ -39,7 +39,6 @@ import java.util.Collection; import java.util.List; import org.apache.commons.cli.ParseException; -import org.apache.commons.io.filefilter.FileFilterUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; @@ -1224,7 +1223,6 @@ public class TestCheckpoint { } /* Test case to test CheckpointSignature */ - @SuppressWarnings("deprecation") @Test public void testCheckpointSignature() throws IOException { @@ -1562,12 +1560,65 @@ public class TestCheckpoint { Mockito.reset(faultInjector); } } + + /** + * Test that a fault while downloading edits the first time after the 2NN + * starts up does not prevent future checkpointing. + */ + @Test(timeout = 30000) + public void testEditFailureOnFirstCheckpoint() throws IOException { + Configuration conf = new HdfsConfiguration(); + SecondaryNameNode secondary = null; + MiniDFSCluster cluster = null; + FileSystem fs = null; + try { + cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes) + .build(); + cluster.waitActive(); + fs = cluster.getFileSystem(); + fs.mkdirs(new Path("test-file-1")); + + // Make sure the on-disk fsimage on the NN has txid > 0. + FSNamesystem fsns = cluster.getNamesystem(); + fsns.enterSafeMode(false); + fsns.saveNamespace(); + fsns.leaveSafeMode(); + + secondary = startSecondaryNameNode(conf); + + // Cause edit rename to fail during next checkpoint + Mockito.doThrow(new IOException("Injecting failure before edit rename")) + .when(faultInjector).beforeEditsRename(); + + try { + secondary.doCheckpoint(); + fail("Fault injection failed."); + } catch (IOException ioe) { + GenericTestUtils.assertExceptionContains( + "Injecting failure before edit rename", ioe); + } + Mockito.reset(faultInjector); + + // Next checkpoint should succeed + secondary.doCheckpoint(); + } finally { + if (secondary != null) { + secondary.shutdown(); + } + if (fs != null) { + fs.close(); + } + if (cluster != null) { + cluster.shutdown(); + } + Mockito.reset(faultInjector); + } + } /** * Test that the secondary namenode correctly deletes temporary edits * on startup. */ - @Test(timeout = 30000) public void testDeleteTemporaryEditsOnStartup() throws IOException { Configuration conf = new HdfsConfiguration(); @@ -1943,7 +1994,6 @@ public class TestCheckpoint { * Test that, if a storage directory is failed when a checkpoint occurs, * the non-failed storage directory receives the checkpoint. */ - @SuppressWarnings("deprecation") @Test public void testCheckpointWithFailedStorageDir() throws Exception { MiniDFSCluster cluster = null; @@ -2006,7 +2056,6 @@ public class TestCheckpoint { * should function correctly. * @throws Exception */ - @SuppressWarnings("deprecation") @Test public void testCheckpointWithSeparateDirsAfterNameFails() throws Exception { MiniDFSCluster cluster = null; From 5eb618ee1f90ccf901edb5d89be181fad1f67d7f Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 6 Sep 2013 03:06:41 +0000 Subject: [PATCH 123/153] HDFS-4491. Parallel testing HDFS. Contributed by Andrey Klochkov. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520479 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-common-project/hadoop-common/pom.xml | 4 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + hadoop-hdfs-project/hadoop-hdfs/pom.xml | 39 ++++++++++++ .../apache/hadoop/hdfs/HftpFileSystem.java | 12 ++-- .../apache/hadoop/hdfs/HsftpFileSystem.java | 4 +- .../hadoop/hdfs/web/WebHdfsFileSystem.java | 13 ++-- .../org/apache/hadoop/fs/TestFiRename.java | 5 +- .../hadoop/fs/TestResolveHdfsSymlink.java | 7 ++- .../hadoop/fs/TestUrlStreamHandler.java | 12 ++-- .../fs/loadGenerator/TestLoadGenerator.java | 11 ++-- .../apache/hadoop/hdfs/MiniDFSCluster.java | 29 ++++++--- .../hadoop/hdfs/TestClientReportBadBlock.java | 4 -- .../apache/hadoop/hdfs/TestDFSRollback.java | 3 - .../org/apache/hadoop/hdfs/TestDFSShell.java | 13 ++-- .../apache/hadoop/hdfs/TestDecommission.java | 3 +- .../hdfs/TestDistributedFileSystem.java | 5 ++ .../hadoop/hdfs/TestFSInputChecker.java | 3 +- .../hadoop/hdfs/TestFileAppendRestart.java | 3 +- .../hadoop/hdfs/TestFileCorruption.java | 3 +- .../hadoop/hdfs/TestHDFSServerPorts.java | 20 +++---- .../hadoop/hdfs/TestHftpURLTimeouts.java | 19 +++--- .../hadoop/hdfs/TestMiniDFSCluster.java | 20 ++----- .../apache/hadoop/hdfs/TestPersistBlocks.java | 3 +- .../hadoop/hdfs/qjournal/TestNNWithQJM.java | 3 +- .../hdfs/qjournal/server/TestJournalNode.java | 5 +- .../TestOverReplicatedBlocks.java | 1 - .../TestReplicationPolicy.java | 4 +- .../TestReplicationPolicyWithNodeGroup.java | 59 ++++++++++--------- .../server/datanode/TestBPOfferService.java | 4 +- .../hdfs/server/namenode/TestAllowFormat.java | 28 ++++----- .../hdfs/server/namenode/TestAuditLogs.java | 4 +- .../hdfs/server/namenode/TestBackupNode.java | 2 + .../hdfs/server/namenode/TestCheckpoint.java | 14 +++-- .../hdfs/server/namenode/TestClusterId.java | 3 +- .../hdfs/server/namenode/TestEditLog.java | 7 ++- .../namenode/TestEditLogFileOutputStream.java | 4 +- .../namenode/TestEditLogJournalFailures.java | 4 +- .../server/namenode/TestFSEditLogLoader.java | 4 +- .../hadoop/hdfs/server/namenode/TestFsck.java | 5 +- .../server/namenode/TestNameEditsConfigs.java | 5 +- .../server/namenode/TestNameNodeRecovery.java | 4 +- .../namenode/TestNameNodeResourceChecker.java | 15 ++--- .../hdfs/server/namenode/TestStartup.java | 4 +- .../server/namenode/TestStorageRestore.java | 3 +- .../server/namenode/TestTransferFsImage.java | 4 +- .../namenode/ha/TestBootstrapStandby.java | 4 +- .../ha/TestDFSZKFailoverController.java | 8 +-- .../server/namenode/ha/TestEditLogTailer.java | 4 +- .../namenode/ha/TestFailureToReadEdits.java | 4 +- .../hdfs/server/namenode/ha/TestHAFsck.java | 4 +- .../namenode/ha/TestStandbyCheckpoints.java | 4 +- .../TestOfflineEditsViewer.java | 3 +- .../TestDelimitedImageVisitor.java | 3 +- .../TestOfflineImageViewer.java | 4 +- .../hdfs/util/TestAtomicFileOutputStream.java | 6 +- .../hadoop/hdfs/util/TestMD5FileUtils.java | 6 +- .../hadoop/hdfs/web/TestWebHdfsTimeouts.java | 9 +-- hadoop-project/pom.xml | 22 +------ 58 files changed, 275 insertions(+), 229 deletions(-) diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index af5a7f4ee95..6f7fa0f0259 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -780,9 +780,7 @@ org.apache.maven.plugins maven-surefire-plugin - perthread - ${testsThreadCount} - classes + ${testsThreadCount} -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 293de2d515d..2015f2954f4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -267,6 +267,8 @@ Release 2.3.0 - UNRELEASED HDFS-5144. Document time unit to NameNodeMetrics. (Akira Ajisaka via suresh) + HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 59abffa0f96..2160d83bac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -700,5 +700,44 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + parallel-tests + + + + maven-antrun-plugin + + + create-parallel-tests-dirs + test-compile + + + + + + + + + + run + + + + + + org.apache.maven.plugins + maven-surefire-plugin + + ${testsThreadCount} + -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError -DminiClusterDedicatedDirs=true + + ${test.build.data}/${surefire.forkNumber} + ${hadoop.tmp.dir}/${surefire.forkNumber} + + + + + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java index 9d9cde4dc02..dd5e9c6daa0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java @@ -51,7 +51,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher; -import org.apache.hadoop.hdfs.web.URLUtils; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.net.NetUtils; @@ -86,6 +86,8 @@ public class HftpFileSystem extends FileSystem HttpURLConnection.setFollowRedirects(true); } + URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + public static final Text TOKEN_KIND = new Text("HFTP delegation"); protected UserGroupInformation ugi; @@ -331,8 +333,8 @@ public class HftpFileSystem extends FileSystem throws IOException { query = addDelegationTokenParam(query); final URL url = getNamenodeURL(path, query); - final HttpURLConnection connection = - (HttpURLConnection)URLUtils.openConnection(url); + final HttpURLConnection connection; + connection = (HttpURLConnection)connectionFactory.openConnection(url); connection.setRequestMethod("GET"); connection.connect(); return connection; @@ -352,12 +354,14 @@ public class HftpFileSystem extends FileSystem } static class RangeHeaderUrlOpener extends ByteRangeInputStream.URLOpener { + URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; + RangeHeaderUrlOpener(final URL url) { super(url); } protected HttpURLConnection openConnection() throws IOException { - return (HttpURLConnection)URLUtils.openConnection(url); + return (HttpURLConnection)connectionFactory.openConnection(url); } /** Use HTTP Range header for specifying offset. */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java index 438d56e52f0..6a3bdba593b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java @@ -40,7 +40,6 @@ import javax.net.ssl.X509TrustManager; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.web.URLUtils; import org.apache.hadoop.util.Time; /** @@ -154,7 +153,8 @@ public class HsftpFileSystem extends HftpFileSystem { query = addDelegationTokenParam(query); final URL url = new URL("https", nnUri.getHost(), nnUri.getPort(), path + '?' + query); - HttpsURLConnection conn = (HttpsURLConnection)URLUtils.openConnection(url); + HttpsURLConnection conn; + conn = (HttpsURLConnection)connectionFactory.openConnection(url); // bypass hostname verification conn.setHostnameVerifier(new DummyHostnameVerifier()); conn.setRequestMethod("GET"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java index 9dbb01b3942..8f1c5895f40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java @@ -119,13 +119,15 @@ public class WebHdfsFileSystem extends FileSystem /** SPNEGO authenticator */ private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator(); + /** Default connection factory may be overriden in tests to use smaller timeout values */ + URLConnectionFactory connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; /** Configures connections for AuthenticatedURL */ - private static final ConnectionConfigurator CONN_CONFIGURATOR = + private final ConnectionConfigurator CONN_CONFIGURATOR = new ConnectionConfigurator() { @Override public HttpURLConnection configure(HttpURLConnection conn) throws IOException { - URLUtils.setTimeouts(conn); + connectionFactory.setTimeouts(conn); return conn; } }; @@ -479,10 +481,9 @@ public class WebHdfsFileSystem extends FileSystem final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token(); conn = new AuthenticatedURL(AUTH, CONN_CONFIGURATOR).openConnection( url, authToken); - URLUtils.setTimeouts(conn); } else { LOG.debug("open URL connection"); - conn = (HttpURLConnection)URLUtils.openConnection(url); + conn = (HttpURLConnection)connectionFactory.openConnection(url); } } catch (AuthenticationException e) { throw new IOException(e); @@ -577,7 +578,7 @@ public class WebHdfsFileSystem extends FileSystem checkRetry = false; //Step 2) Submit another Http request with the URL from the Location header with data. - conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect)); + conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect)); conn.setRequestProperty("Content-Type", MediaType.APPLICATION_OCTET_STREAM); conn.setChunkedStreamingMode(32 << 10); //32kB-chunk connect(); @@ -600,7 +601,7 @@ public class WebHdfsFileSystem extends FileSystem disconnect(); checkRetry = false; - conn = (HttpURLConnection)URLUtils.openConnection(new URL(redirect)); + conn = (HttpURLConnection)connectionFactory.openConnection(new URL(redirect)); connect(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java index 794c0571944..5ad39304da3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/fs/TestFiRename.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs; +import java.io.File; import java.io.IOException; import java.util.EnumSet; @@ -27,6 +28,7 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -56,8 +58,7 @@ public class TestFiRename { private static String addChild = ""; private static byte[] data = { 0 }; - private static String TEST_ROOT_DIR = - System.getProperty("test.build.data", "/tmp") + "/test"; + private static String TEST_ROOT_DIR = PathUtils.getTestDirName(TestFiRename.class); private static Configuration CONF = new Configuration(); static { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java index c48759e1f5e..cfd4a8d418c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java @@ -18,6 +18,7 @@ package org.apache.hadoop.fs; +import java.io.File; import static org.junit.Assert.fail; import java.io.FileNotFoundException; @@ -36,6 +37,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.test.GenericTestUtils; import org.junit.AfterClass; import org.junit.Assert; @@ -48,6 +50,7 @@ import org.junit.Test; * underlying file system as Hdfs. */ public class TestResolveHdfsSymlink { + private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestResolveHdfsSymlink.class); private static MiniDFSCluster cluster = null; @BeforeClass @@ -80,12 +83,12 @@ public class TestResolveHdfsSymlink { .getUri()); Path alphaLocalPath = new Path(fcLocal.getDefaultFileSystem().getUri() - .toString(), "/tmp/alpha"); + .toString(), new File(TEST_ROOT_DIR, "alpha").getAbsolutePath()); DFSTestUtil.createFile(FileSystem.getLocal(conf), alphaLocalPath, 16, (short) 1, 2); Path linkTarget = new Path(fcLocal.getDefaultFileSystem().getUri() - .toString(), "/tmp"); + .toString(), TEST_ROOT_DIR.getAbsolutePath()); Path hdfsLink = new Path(fcHdfs.getDefaultFileSystem().getUri().toString(), "/tmp/link"); fcHdfs.createSymlink(linkTarget, hdfsLink, true); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java index d28736cffae..845eb6314ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestUrlStreamHandler.java @@ -31,6 +31,7 @@ import java.net.URL; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -38,6 +39,8 @@ import org.junit.Test; */ public class TestUrlStreamHandler { + private static File TEST_ROOT_DIR = PathUtils.getTestDir(TestUrlStreamHandler.class); + /** * Test opening and reading from an InputStream through a hdfs:// URL. *

    @@ -111,13 +114,12 @@ public class TestUrlStreamHandler { Configuration conf = new HdfsConfiguration(); // Locate the test temporary directory. - File tmpDir = new File(conf.get("hadoop.tmp.dir")); - if (!tmpDir.exists()) { - if (!tmpDir.mkdirs()) - throw new IOException("Cannot create temporary directory: " + tmpDir); + if (!TEST_ROOT_DIR.exists()) { + if (!TEST_ROOT_DIR.mkdirs()) + throw new IOException("Cannot create temporary directory: " + TEST_ROOT_DIR); } - File tmpFile = new File(tmpDir, "thefile"); + File tmpFile = new File(TEST_ROOT_DIR, "thefile"); URI uri = tmpFile.toURI(); FileSystem fs = FileSystem.get(uri, conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java index e9e14ce8b11..6c7bac31ea8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java @@ -29,6 +29,7 @@ import org.apache.hadoop.conf.Configured; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; import org.apache.hadoop.util.Tool; import org.apache.hadoop.util.ToolRunner; @@ -39,8 +40,7 @@ import org.junit.Test; public class TestLoadGenerator extends Configured implements Tool { private static final Configuration CONF = new HdfsConfiguration(); private static final int DEFAULT_BLOCK_SIZE = 10; - private static final String OUT_DIR = - System.getProperty("test.build.data","build/test/data"); + private static final File OUT_DIR = PathUtils.getTestDir(TestLoadGenerator.class); private static final File DIR_STRUCTURE_FILE = new File(OUT_DIR, StructureGenerator.DIR_STRUCTURE_FILE_NAME); private static final File FILE_STRUCTURE_FILE = @@ -65,7 +65,7 @@ public class TestLoadGenerator extends Configured implements Tool { StructureGenerator sg = new StructureGenerator(); String[] args = new String[]{"-maxDepth", "2", "-minWidth", "1", "-maxWidth", "2", "-numOfFiles", "2", - "-avgFileSize", "1", "-outDir", OUT_DIR, "-seed", "1"}; + "-avgFileSize", "1", "-outDir", OUT_DIR.getAbsolutePath(), "-seed", "1"}; final int MAX_DEPTH = 1; final int MIN_WIDTH = 3; @@ -133,8 +133,7 @@ public class TestLoadGenerator extends Configured implements Tool { public void testLoadGenerator() throws Exception { final String TEST_SPACE_ROOT = "/test"; - final String SCRIPT_TEST_DIR = new File(System.getProperty("test.build.data", - "/tmp")).getAbsolutePath(); + final String SCRIPT_TEST_DIR = OUT_DIR.getAbsolutePath(); String script = SCRIPT_TEST_DIR + "/" + "loadgenscript"; String script2 = SCRIPT_TEST_DIR + "/" + "loadgenscript2"; File scriptFile1 = new File(script); @@ -156,7 +155,7 @@ public class TestLoadGenerator extends Configured implements Tool { try { DataGenerator dg = new DataGenerator(); dg.setConf(CONF); - String [] args = new String[] {"-inDir", OUT_DIR, "-root", TEST_SPACE_ROOT}; + String [] args = new String[] {"-inDir", OUT_DIR.getAbsolutePath(), "-root", TEST_SPACE_ROOT}; assertEquals(0, dg.run(args)); final int READ_PROBABILITY = 1; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 7090d498444..222a9b77ef5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -1407,6 +1407,13 @@ public class MiniDFSCluster { * Shutdown all the nodes in the cluster. */ public void shutdown() { + shutdown(false); + } + + /** + * Shutdown all the nodes in the cluster. + */ + public void shutdown(boolean deleteDfsDir) { LOG.info("Shutting down the Mini HDFS Cluster"); if (checkExitOnShutdown) { if (ExitUtil.terminateCalled()) { @@ -1426,6 +1433,11 @@ public class MiniDFSCluster { nameNode = null; } } + if (deleteDfsDir) { + base_dir.delete(); + } else { + base_dir.deleteOnExit(); + } } /** @@ -2118,7 +2130,7 @@ public class MiniDFSCluster { *

  • /data/data<2*dnIndex + 1>
  • *
  • /data/data<2*dnIndex + 2>
  • * - * + * * @param dnIndex datanode index (starts from 0) * @param dirIndex directory index (0 or 1). Index 0 provides access to the * first storage directory. Index 1 provides access to the second @@ -2149,7 +2161,7 @@ public class MiniDFSCluster { public static String getDNCurrentDir(File storageDir) { return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/"; } - + /** * Get directory corresponding to block pool directory in the datanode * @param storageDir the storage directory of a datanode. @@ -2255,7 +2267,7 @@ public class MiniDFSCluster { } return null; } - + /** * Get the block metadata file for a block from a given datanode * @@ -2343,14 +2355,17 @@ public class MiniDFSCluster { } else { if (checkDataNodeAddrConfig) { conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } else { conf.set(DFS_DATANODE_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); } } + if (checkDataNodeAddrConfig) { + conf.setIfUnset(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.setIfUnset(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); + } else { + conf.set(DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); + conf.set(DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0"); + } } private void addToFile(String p, String address) throws IOException { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java index 51fab6653f3..74c763d5aa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestClientReportBadBlock.java @@ -71,10 +71,6 @@ public class TestClientReportBadBlock { @Before public void startUpCluster() throws IOException { - if (System.getProperty("test.build.data") == null) { // to allow test to be - // run outside of Ant - System.setProperty("test.build.data", "build/test/data"); - } // disable block scanner conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java index e54e2777c44..fbcce3946ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java @@ -20,9 +20,6 @@ package org.apache.hadoop.hdfs; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.DATA_NODE; import static org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType.NAME_NODE; import static org.junit.Assert.*; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.fail; import java.io.File; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java index e7d82688a9d..42873785f37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSShell.java @@ -44,6 +44,7 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.io.compress.BZip2Codec; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.ToolRunner; @@ -61,9 +62,7 @@ public class TestDFSShell { private static final Log LOG = LogFactory.getLog(TestDFSShell.class); private static AtomicInteger counter = new AtomicInteger(); - static final String TEST_ROOT_DIR = - new Path(System.getProperty("test.build.data","/tmp")) - .toString().replace(' ', '+'); + static final String TEST_ROOT_DIR = PathUtils.getTestDirName(TestDFSShell.class); static Path writeFile(FileSystem fs, Path f) throws IOException { DataOutputStream out = fs.create(f); @@ -482,12 +481,11 @@ public class TestDFSShell { Configuration dstConf = new HdfsConfiguration(); MiniDFSCluster srcCluster = null; MiniDFSCluster dstCluster = null; - String bak = System.getProperty("test.build.data"); + File bak = new File(PathUtils.getTestDir(getClass()), "dfs_tmp_uri"); + bak.mkdirs(); try{ srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build(); - File nameDir = new File(new File(bak), "dfs_tmp_uri/"); - nameDir.mkdirs(); - System.setProperty("test.build.data", nameDir.toString()); + dstConf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, bak.getAbsolutePath()); dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build(); FileSystem srcFs = srcCluster.getFileSystem(); FileSystem dstFs = dstCluster.getFileSystem(); @@ -559,7 +557,6 @@ public class TestDFSShell { ret = ToolRunner.run(shell, argv); assertEquals("default works for rm/rmr", 0, ret); } finally { - System.setProperty("test.build.data", bak); if (null != srcCluster) { srcCluster.shutdown(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java index 05d22961dd0..9c2e038f3f1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDecommission.java @@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; +import org.apache.hadoop.test.PathUtils; import org.junit.After; import org.junit.Assert; import org.junit.Before; @@ -75,7 +76,7 @@ public class TestDecommission { // Set up the hosts/exclude files. localFileSys = FileSystem.getLocal(conf); Path workingDir = localFileSys.getWorkingDirectory(); - Path dir = new Path(workingDir, System.getProperty("test.build.data", "target/test/data") + "/work-dir/decommission"); + Path dir = new Path(workingDir, PathUtils.getTestDirName(getClass()) + "/work-dir/decommission"); hostsFile = new Path(dir, "hosts"); excludeFile = new Path(dir, "exclude"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java index 9b4f3130d7f..c0093d21aa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDistributedFileSystem.java @@ -25,6 +25,8 @@ import static org.junit.Assert.fail; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; + +import java.io.File; import java.io.FileNotFoundException; import java.io.IOException; import java.net.URI; @@ -73,6 +75,9 @@ public class TestDistributedFileSystem { HdfsConfiguration conf; if (noXmlDefaults) { conf = new HdfsConfiguration(false); + String namenodeDir = new File(MiniDFSCluster.getBaseDirectory(), "name").getAbsolutePath(); + conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, namenodeDir); + conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, namenodeDir); } else { conf = new HdfsConfiguration(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java index 2f2c9a4fd6e..5fc567a2131 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFSInputChecker.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -260,7 +261,7 @@ public class TestFSInputChecker { // create a file and verify that checksum corruption results in // a checksum exception on LocalFS - String dir = System.getProperty("test.build.data", "."); + String dir = PathUtils.getTestDirName(getClass()); Path file = new Path(dir + "/corruption-test.dat"); Path crcFile = new Path(dir + "/.corruption-test.dat.crc"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java index d086c77a9bf..570b19f8d2e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppendRestart.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.namenode.NNStorage; import org.apache.hadoop.hdfs.util.Holder; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -145,7 +146,7 @@ public class TestFileAppendRestart { String tarFile = System.getProperty("test.cache.data", "build/test/cache") + "/" + HADOOP_23_BROKEN_APPEND_TGZ; - String testDir = System.getProperty("test.build.data", "build/test/data"); + String testDir = PathUtils.getTestDirName(getClass()); File dfsDir = new File(testDir, "image-with-buggy-append"); if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) { throw new IOException("Could not delete dfs directory '" + dfsDir + "'"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java index 458880af566..6531fe7c050 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCorruption.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; +import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; import org.junit.Test; @@ -95,7 +96,7 @@ public class TestFileCorruption { @Test public void testLocalFileCorruption() throws Exception { Configuration conf = new HdfsConfiguration(); - Path file = new Path(System.getProperty("test.build.data"), "corruptFile"); + Path file = new Path(PathUtils.getTestDirName(getClass()), "corruptFile"); FileSystem fs = FileSystem.getLocal(conf); DataOutputStream dos = fs.create(file); dos.writeBytes("original bytes"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java index 036252ddc31..59d1615025d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSServerPorts.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.server.namenode.BackupNode; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.net.DNS; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -53,6 +54,9 @@ public class TestHDFSServerPorts { // reset default 0.0.0.0 addresses in order to avoid IPv6 problem static final String THIS_HOST = getFullHostName() + ":0"; + + private static final File TEST_DATA_DIR = PathUtils.getTestDir(TestHDFSServerPorts.class); + static { DefaultMetricsSystem.setMiniClusterMode(true); } @@ -81,13 +85,6 @@ public class TestHDFSServerPorts { } } - /** - * Get base directory these tests should run in. - */ - private String getTestingDir() { - return System.getProperty("test.build.data", "build/test/data"); - } - public NameNode startNameNode() throws IOException { return startNameNode(false); } @@ -95,8 +92,7 @@ public class TestHDFSServerPorts { * Start the namenode. */ public NameNode startNameNode(boolean withService) throws IOException { - String dataDir = getTestingDir(); - hdfsDir = new File(dataDir, "dfs"); + hdfsDir = new File(TEST_DATA_DIR, "dfs"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } @@ -119,9 +115,8 @@ public class TestHDFSServerPorts { * Start the BackupNode */ public BackupNode startBackupNode(Configuration conf) throws IOException { - String dataDir = getTestingDir(); // Set up testing environment directories - hdfsDir = new File(dataDir, "backupNode"); + hdfsDir = new File(TEST_DATA_DIR, "backupNode"); if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) { throw new IOException("Could not delete hdfs directory '" + hdfsDir + "'"); } @@ -150,8 +145,7 @@ public class TestHDFSServerPorts { */ public DataNode startDataNode(int index, Configuration config) throws IOException { - String dataDir = getTestingDir(); - File dataNodeDir = new File(dataDir, "data-" + index); + File dataNodeDir = new File(TEST_DATA_DIR, "data-" + index); config.set(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY, dataNodeDir.getPath()); String[] args = new String[] {}; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java index d9a22c10111..56bd21ef2ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpURLTimeouts.java @@ -33,16 +33,11 @@ import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.hdfs.web.URLUtils; -import org.junit.BeforeClass; +import org.apache.hadoop.hdfs.web.URLConnectionFactory; import org.junit.Test; public class TestHftpURLTimeouts { - @BeforeClass - public static void setup() { - URLUtils.SOCKET_TIMEOUT = 5; - } - + @Test public void testHftpSocketTimeout() throws Exception { Configuration conf = new Configuration(); @@ -51,9 +46,11 @@ public class TestHftpURLTimeouts { InetAddress.getByName(null).getHostAddress(), socket.getLocalPort(), null, null, null); - boolean timedout = false; HftpFileSystem fs = (HftpFileSystem)FileSystem.get(uri, conf); + fs.connectionFactory = new URLConnectionFactory(5); + + boolean timedout = false; try { HttpURLConnection conn = fs.openConnection("/", ""); timedout = false; @@ -69,6 +66,7 @@ public class TestHftpURLTimeouts { assertTrue("read timedout", timedout); assertTrue("connect timedout", checkConnectTimeout(fs, false)); } finally { + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; fs.close(); } } @@ -84,6 +82,8 @@ public class TestHftpURLTimeouts { boolean timedout = false; HsftpFileSystem fs = (HsftpFileSystem)FileSystem.get(uri, conf); + fs.connectionFactory = new URLConnectionFactory(5); + try { HttpURLConnection conn = null; timedout = false; @@ -100,6 +100,7 @@ public class TestHftpURLTimeouts { assertTrue("ssl read connect timedout", timedout); assertTrue("connect timedout", checkConnectTimeout(fs, true)); } finally { + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; fs.close(); } } @@ -121,7 +122,7 @@ public class TestHftpURLTimeouts { // https will get a read timeout due to SSL negotiation, but // a normal http will not, so need to ignore SSL read timeouts // until a connect timeout occurs - if (!(ignoreReadTimeout && message.equals("Read timed out"))) { + if (!(ignoreReadTimeout && "Read timed out".equals(message))) { timedout = true; assertEquals("connect timed out", message); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java index 1400f07e062..84678da2d40 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java @@ -25,8 +25,8 @@ import static org.junit.Assume.assumeTrue; import java.io.File; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.hdfs.protocol.FSConstants; -import org.junit.After; +import org.apache.hadoop.hdfs.protocol.HdfsConstants; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -43,20 +43,10 @@ public class TestMiniDFSCluster { private static final String CLUSTER_3 = "cluster3"; private static final String CLUSTER_4 = "cluster4"; private static final String CLUSTER_5 = "cluster5"; - protected String testDataPath; - protected File testDataDir; + protected File testDataPath; @Before public void setUp() { - testDataPath = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, - "build/test/data"); - testDataDir = new File(new File(testDataPath).getParentFile(), - "miniclusters"); - - - } - @After - public void tearDown() { - System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testDataPath); + testDataPath = new File(PathUtils.getTestDir(getClass()), "miniclusters"); } /** @@ -120,7 +110,7 @@ public class TestMiniDFSCluster { MiniDFSCluster cluster4 = new MiniDFSCluster.Builder(conf).build(); try { DistributedFileSystem dfs = (DistributedFileSystem) cluster4.getFileSystem(); - dfs.setSafeMode(FSConstants.SafeModeAction.SAFEMODE_ENTER); + dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER); cluster4.shutdown(); } finally { while(cluster4.isClusterUp()){ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java index 5077a6dfdfd..424cc77a19d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPersistBlocks.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; import org.junit.Test; @@ -322,7 +323,7 @@ public class TestPersistBlocks { String tarFile = System.getProperty("test.cache.data", "build/test/cache") + "/" + HADOOP_1_0_MULTIBLOCK_TGZ; - String testDir = System.getProperty("test.build.data", "build/test/data"); + String testDir = PathUtils.getTestDirName(getClass()); File dfsDir = new File(testDir, "image-1.0"); if (dfsDir.exists() && !FileUtil.fullyDelete(dfsDir)) { throw new IOException("Could not delete dfs directory '" + dfsDir + "'"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java index cfe463c4356..a7eed468e62 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/TestNNWithQJM.java @@ -174,6 +174,7 @@ public class TestNNWithQJM { public void testMismatchedNNIsRejected() throws Exception { conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, MiniDFSCluster.getBaseDirectory() + "/TestNNWithQJM/image"); + String defaultEditsDir = conf.get(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, mjc.getQuorumJournalURI("myjournal").toString()); @@ -187,7 +188,7 @@ public class TestNNWithQJM { // Reformat just the on-disk portion Configuration onDiskOnly = new Configuration(conf); - onDiskOnly.unset(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY); + onDiskOnly.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, defaultEditsDir); NameNode.format(onDiskOnly); // Start the NN - should fail because the JNs are still formatted diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java index e6e140443bf..79ca6ca72bb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java @@ -46,6 +46,7 @@ import org.apache.hadoop.metrics2.MetricsRecordBuilder; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.test.GenericTestUtils; import org.apache.hadoop.test.MetricsAsserts; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Shell; import org.junit.After; import org.junit.Before; @@ -61,13 +62,13 @@ public class TestJournalNode { private static final NamespaceInfo FAKE_NSINFO = new NamespaceInfo( 12345, "mycluster", "my-bp", 0L); + private static File TEST_BUILD_DATA = PathUtils.getTestDir(TestJournalNode.class); + private JournalNode jn; private Journal journal; private Configuration conf = new Configuration(); private IPCLoggerChannel ch; private String journalId; - private File TEST_BUILD_DATA = - new File(System.getProperty("test.build.data", "build/test/data")); static { // Avoid an error when we double-initialize JvmMetrics diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java index 79785961c91..972a785f5d6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import java.io.File; -import java.io.IOException; import java.util.Collection; import org.apache.hadoop.conf.Configuration; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ba6c3737266..5f15d0e2cb9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.Namesystem; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; import org.apache.log4j.Logger; @@ -92,8 +93,7 @@ public class TestReplicationPolicy { FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - File baseDir = new File(System.getProperty( - "test.build.data", "build/test/data"), "dfs/"); + File baseDir = PathUtils.getTestDir(TestReplicationPolicy.class); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(baseDir, "name").getPath()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index 032c2c08396..c453f198e1d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -22,7 +22,6 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; import java.io.File; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -41,6 +40,9 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.Node; +import org.apache.hadoop.test.PathUtils; +import org.junit.After; +import org.junit.Before; import org.junit.Test; public class TestReplicationPolicyWithNodeGroup { @@ -48,10 +50,10 @@ public class TestReplicationPolicyWithNodeGroup { private static final int NUM_OF_DATANODES = 8; private static final int NUM_OF_DATANODES_BOUNDARY = 6; private static final int NUM_OF_DATANODES_MORE_TARGETS = 12; - private static final Configuration CONF = new HdfsConfiguration(); - private static final NetworkTopology cluster; - private static final NameNode namenode; - private static final BlockPlacementPolicy replicator; + private final Configuration CONF = new HdfsConfiguration(); + private NetworkTopology cluster; + private NameNode namenode; + private BlockPlacementPolicy replicator; private static final String filename = "/dummyfile.txt"; private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] { @@ -94,27 +96,23 @@ public class TestReplicationPolicyWithNodeGroup { private final static DatanodeDescriptor NODE = new DatanodeDescriptor(DFSTestUtil.getDatanodeDescriptor("9.9.9.9", "/d2/r4/n7")); - static { - try { - FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); - CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); - // Set properties to make HDFS aware of NodeGroup. - CONF.set("dfs.block.replicator.classname", - "org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup"); - CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, - "org.apache.hadoop.net.NetworkTopologyWithNodeGroup"); - - File baseDir = new File(System.getProperty( - "test.build.data", "build/test/data"), "dfs/"); - CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, - new File(baseDir, "name").getPath()); - - DFSTestUtil.formatNameNode(CONF); - namenode = new NameNode(CONF); - } catch (IOException e) { - e.printStackTrace(); - throw (RuntimeException)new RuntimeException().initCause(e); - } + @Before + public void setUp() throws Exception { + FileSystem.setDefaultUri(CONF, "hdfs://localhost:0"); + CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0"); + // Set properties to make HDFS aware of NodeGroup. + CONF.set("dfs.block.replicator.classname", + "org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyWithNodeGroup"); + CONF.set(CommonConfigurationKeysPublic.NET_TOPOLOGY_IMPL_KEY, + "org.apache.hadoop.net.NetworkTopologyWithNodeGroup"); + + File baseDir = PathUtils.getTestDir(TestReplicationPolicyWithNodeGroup.class); + + CONF.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, + new File(baseDir, "name").getPath()); + + DFSTestUtil.formatNameNode(CONF); + namenode = new NameNode(CONF); final BlockManager bm = namenode.getNamesystem().getBlockManager(); replicator = bm.getBlockPlacementPolicy(); cluster = bm.getDatanodeManager().getNetworkTopology(); @@ -125,6 +123,11 @@ public class TestReplicationPolicyWithNodeGroup { setupDataNodeCapacity(); } + @After + public void tearDown() throws Exception { + namenode.stop(); + } + private static void setupDataNodeCapacity() { for(int i=0; i fsImageDirs = new ArrayList(); ArrayList editsDirs = new ArrayList(); File filePath = - new File(System.getProperty("test.build.data","/tmp"), "storageDirToCheck"); + new File(PathUtils.getTestDir(getClass()), "storageDirToCheck"); assertTrue("Couldn't create directory storageDirToCheck", filePath.exists() || filePath.mkdirs()); fsImageDirs.add(filePath.toURI()); @@ -1911,9 +1912,11 @@ public class TestCheckpoint { } // Start a new NN with the same host/port. - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) - .nameNodePort(origPort).nameNodeHttpPort(origHttpPort).format(true) - .build(); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) + .nameNodePort(origPort) + .nameNodeHttpPort(origHttpPort) + .format(true).build(); try { secondary.doCheckpoint(); @@ -2135,7 +2138,8 @@ public class TestCheckpoint { conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_CHECK_PERIOD_KEY, 1); try { - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0) + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0) .format(true).build(); FileSystem fs = cluster.getFileSystem(); secondary = startSecondaryNameNode(conf); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java index 4330317d6ff..7c23dd55df5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestClusterId.java @@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption; import org.apache.hadoop.hdfs.server.common.Storage; import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil; import org.apache.hadoop.util.ExitUtil.ExitException; import org.junit.After; @@ -72,7 +73,7 @@ public class TestClusterId { public void setUp() throws IOException { ExitUtil.disableSystemExit(); - String baseDir = System.getProperty("test.build.data", "build/test/data"); + String baseDir = PathUtils.getTestDirName(getClass()); hdfsDir = new File(baseDir, "dfs/name"); if (hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir)) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java index f83a531b463..1be4a228895 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.util.Time; import org.apache.log4j.Level; @@ -96,9 +97,8 @@ public class TestEditLog { static final int NUM_TRANSACTIONS = 100; static final int NUM_THREADS = 100; - static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); - + static final File TEST_DIR = PathUtils.getTestDir(TestEditLog.class); + /** An edits log with 3 edits from 0.20 - the result of * a fresh namesystem followed by hadoop fs -touchz /myfile */ static final byte[] HADOOP20_SOME_EDITS = @@ -569,6 +569,7 @@ public class TestEditLog { fail("should not be able to start"); } catch (IOException e) { // expected + assertNotNull("Cause of exception should be ChecksumException", e.getCause()); assertEquals("Cause of exception should be ChecksumException", ChecksumException.class, e.getCause().getClass()); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java index a6e170dee71..e230d5affc6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogFileOutputStream.java @@ -24,6 +24,7 @@ import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.util.StringUtils; import org.junit.After; @@ -34,8 +35,7 @@ import org.junit.Test; * Test the EditLogFileOutputStream */ public class TestEditLogFileOutputStream { - private final static File TEST_DIR = - new File(System.getProperty("test.build.data", "/tmp")); + private final static File TEST_DIR = PathUtils.getTestDir(TestEditLogFileOutputStream.class); private static final File TEST_EDITS = new File(TEST_DIR, "testEditLogFileOutput.log"); final static int MIN_PREALLOCATION_LENGTH = diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java index 486b17c3925..610a4a2fd4c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLogJournalFailures.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.JournalSet.JournalAndStream; import org.apache.hadoop.ipc.RemoteException; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.ExitUtil.ExitException; import org.junit.After; import org.junit.Before; @@ -192,8 +193,7 @@ public class TestEditLogJournalFailures { Configuration conf = new HdfsConfiguration(); String[] nameDirs = new String[4]; for (int i = 0; i < nameDirs.length; i++) { - File nameDir = new File(System.getProperty("test.build.data"), - "name-dir" + i); + File nameDir = new File(PathUtils.getTestDir(getClass()), "name-dir" + i); nameDir.mkdirs(); nameDirs[i] = nameDir.getAbsolutePath(); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java index dd637a9212d..4db7e6a7dfa 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory; import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.log4j.Level; import org.junit.Test; @@ -60,8 +61,7 @@ public class TestFSEditLogLoader { ((Log4JLogger)FSEditLogLoader.LOG).getLogger().setLevel(Level.ALL); } - private static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); + private static final File TEST_DIR = PathUtils.getTestDir(TestFSEditLogLoader.class); private static final int NUM_DATA_NODES = 0; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java index 754e56966d3..9aaeb74a1c3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java @@ -91,7 +91,7 @@ import static org.mockito.Mockito.*; */ public class TestFsck { static final String auditLogFile = System.getProperty("test.build.dir", - "build/test") + "/audit.log"; + "build/test") + "/TestFsck-audit.log"; // Pattern for: // allowed=true ugi=name ip=/address cmd=FSCK src=/ dst=null perm=null @@ -159,7 +159,8 @@ public class TestFsck { cluster.shutdown(); // restart the cluster; bring up namenode but not the data nodes - cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build(); + cluster = new MiniDFSCluster.Builder(conf) + .numDataNodes(0).format(false).build(); outStr = runFsck(conf, 1, true, "/"); // expect the result is corrupt assertTrue(outStr.contains(NamenodeFsck.CORRUPT_STATUS)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java index 4e4914b3ab6..5c11c1badf2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameEditsConfigs.java @@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -60,7 +61,7 @@ public class TestNameEditsConfigs { short replication = 3; private File base_dir = new File( - System.getProperty("test.build.data", "build/test/data"), "dfs/"); + PathUtils.getTestDir(TestNameEditsConfigs.class), "dfs"); @Before public void setUp() throws IOException { @@ -68,7 +69,7 @@ public class TestNameEditsConfigs { throw new IOException("Cannot remove directory " + base_dir); } } - + void checkImageAndEditsFilesExistence(File dir, boolean shouldHaveImages, boolean shouldHaveEdits) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java index 42f3f5764f9..30d3e71ad8d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeRecovery.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.DeleteOp; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.OpInstanceCache; import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType; import org.apache.hadoop.io.IOUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.junit.Test; @@ -57,8 +58,7 @@ import com.google.common.collect.Sets; public class TestNameNodeRecovery { private static final Log LOG = LogFactory.getLog(TestNameNodeRecovery.class); private static StartupOption recoverStartOpt = StartupOption.RECOVER; - private static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); + private static final File TEST_DIR = PathUtils.getTestDir(TestNameNodeRecovery.class); static { recoverStartOpt.setForce(MetaRecoveryContext.FORCE_ALL); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java index e73d71aff70..2012b6aabe1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java @@ -33,12 +33,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor; import org.apache.hadoop.hdfs.server.namenode.NameNodeResourceChecker.CheckedVolume; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.Time; import org.junit.Before; import org.junit.Test; import org.mockito.Mockito; public class TestNameNodeResourceChecker { + private final static File BASE_DIR = PathUtils.getTestDir(TestNameNodeResourceChecker.class); private Configuration conf; private File baseDir; private File nameDir; @@ -46,8 +48,7 @@ public class TestNameNodeResourceChecker { @Before public void setUp () throws IOException { conf = new Configuration(); - baseDir = new File(System.getProperty("test.build.data")); - nameDir = new File(baseDir, "resource-check-name-dir"); + nameDir = new File(BASE_DIR, "resource-check-name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath()); } @@ -141,8 +142,8 @@ public class TestNameNodeResourceChecker { @Test public void testChecking2NameDirsOnOneVolume() throws IOException { Configuration conf = new Configuration(); - File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1"); - File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2"); + File nameDir1 = new File(BASE_DIR, "name-dir1"); + File nameDir2 = new File(BASE_DIR, "name-dir2"); nameDir1.mkdirs(); nameDir2.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, @@ -162,7 +163,7 @@ public class TestNameNodeResourceChecker { @Test public void testCheckingExtraVolumes() throws IOException { Configuration conf = new Configuration(); - File nameDir = new File(System.getProperty("test.build.data"), "name-dir"); + File nameDir = new File(BASE_DIR, "name-dir"); nameDir.mkdirs(); conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, nameDir.getAbsolutePath()); conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath()); @@ -182,8 +183,8 @@ public class TestNameNodeResourceChecker { @Test public void testLowResourceVolumePolicy() throws IOException, URISyntaxException { Configuration conf = new Configuration(); - File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1"); - File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2"); + File nameDir1 = new File(BASE_DIR, "name-dir1"); + File nameDir2 = new File(BASE_DIR, "name-dir2"); nameDir1.mkdirs(); nameDir2.mkdirs(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java index 6d6dbdf79ce..86323ff5adb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartup.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.hdfs.util.MD5FileUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.test.GenericTestUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.apache.log4j.Logger; import org.junit.After; @@ -404,8 +405,7 @@ public class TestStartup { Configuration conf = new Configuration(); FileSystem.setDefaultUri(conf, "hdfs://localhost:0"); conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0"); - File base_dir = new File(System.getProperty( - "test.build.data", "build/test/data"), "dfs/"); + File base_dir = new File(PathUtils.getTestDir(getClass()), "dfs/"); conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, new File(base_dir, "name").getPath()); conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java index 905e3680e0b..1a612e83ec7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStorageRestore.java @@ -391,7 +391,8 @@ public class TestStorageRestore { (new File(path3, "current").getAbsolutePath()) : path3.toString(); try { - cluster = new MiniDFSCluster.Builder(config).numDataNodes(0) + cluster = new MiniDFSCluster.Builder(config) + .numDataNodes(0) .manageNameDfsDirs(false).build(); cluster.waitActive(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java index cba634fee92..5a178d19440 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestTransferFsImage.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.http.HttpServerFunctionalTest; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.test.PathUtils; import org.apache.hadoop.util.StringUtils; import org.junit.Test; import org.mockito.Mockito; @@ -48,8 +49,7 @@ import com.google.common.collect.ImmutableList; public class TestTransferFsImage { - private static final File TEST_DIR = new File( - System.getProperty("test.build.data","build/test/data")); + private static final File TEST_DIR = PathUtils.getTestDir(TestTransferFsImage.class); /** * Regression test for HDFS-1997. Test that, if an exception diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java index 678e03866d5..502c9de4096 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestBootstrapStandby.java @@ -56,8 +56,8 @@ public class TestBootstrapStandby { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(20001)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(20002))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java index ce005b10a14..18972655765 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDFSZKFailoverController.java @@ -70,13 +70,13 @@ public class TestDFSZKFailoverController extends ClientBaseWithFixes { CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0); - conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10003); - conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10004); + conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn1", 10023); + conf.setInt(DFSConfigKeys.DFS_HA_ZKFC_PORT_KEY + ".ns1.nn2", 10024); MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10021)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10022))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java index 8675fa3fc6f..8c61c9237e6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java @@ -124,8 +124,8 @@ public class TestEditLogTailer { // Have to specify IPC ports so the NNs can talk to each other. MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java index 37c0b16fabd..5ec7f7e1c1e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java @@ -76,8 +76,8 @@ public class TestFailureToReadEdits { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10041)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10042))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java index 49d89592b8a..4f848dcf834 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java @@ -52,8 +52,8 @@ public class TestHAFsck { // need some HTTP ports MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ha-nn-uri-0") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10051)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10052))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java index dff28740690..3ff5d54dc66 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java @@ -89,8 +89,8 @@ public class TestStandbyCheckpoints { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java index 1d189a108de..e6c9a3f3967 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper; import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -53,7 +54,7 @@ public class TestOfflineEditsViewer { } private static String buildDir = - System.getProperty("test.build.data", "build/test/data"); + PathUtils.getTestDirName(TestOfflineEditsViewer.class); private static String cacheDir = System.getProperty("test.cache.data", "build/test/cache"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java index a5501d97547..c7d3b31dacf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java @@ -27,6 +27,7 @@ import java.io.FileReader; import java.io.IOException; import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -34,7 +35,7 @@ import org.junit.Test; * on predetermined inputs */ public class TestDelimitedImageVisitor { - private static String ROOT = System.getProperty("test.build.data","/tmp"); + private static String ROOT = PathUtils.getTestDirName(TestDelimitedImageVisitor.class); private static final String delim = "--"; // Record an element in the visitor and build the expected line in the output diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java index 50e816417d3..11aa3b821f0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.test.PathUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -88,8 +89,7 @@ public class TestOfflineImageViewer { final static HashMap writtenFiles = new HashMap(); - private static String ROOT = System.getProperty("test.build.data", - "build/test/data"); + private static String ROOT = PathUtils.getTestDirName(TestOfflineImageViewer.class); // Create a populated namespace for later testing. Save its contents to a // data structure and store its fsimage location. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java index ebbb4e22701..2a9465aa90c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java @@ -30,6 +30,7 @@ import java.io.OutputStream; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -40,10 +41,7 @@ public class TestAtomicFileOutputStream { private static final String TEST_STRING = "hello world"; private static final String TEST_STRING_2 = "goodbye world"; - private static File BASE_DIR = new File( - System.getProperty("test.build.data", "build/test/data")); - private static File TEST_DIR = new File(BASE_DIR, - TestAtomicFileOutputStream.class.getName()); + private static File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class); private static File DST_FILE = new File(TEST_DIR, "test.txt"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java index 6f5b1613360..35fa46d20cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java @@ -29,14 +29,12 @@ import java.io.IOException; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; public class TestMD5FileUtils { - private static final File TEST_DIR_ROOT = new File( - System.getProperty("test.build.data","build/test/data")); - private static final File TEST_DIR = new File(TEST_DIR_ROOT, - "TestMD5FileUtils"); + private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class); private static final File TEST_FILE = new File(TEST_DIR, "testMd5File.dat"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java index 2071f6feb87..7a007a05928 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java @@ -58,7 +58,6 @@ public class TestWebHdfsTimeouts { private static final int CLIENTS_TO_CONSUME_BACKLOG = 100; private static final int CONNECTION_BACKLOG = 1; - private static final int INITIAL_SOCKET_TIMEOUT = URLUtils.SOCKET_TIMEOUT; private static final int SHORT_SOCKET_TIMEOUT = 5; private static final int TEST_TIMEOUT = 10000; @@ -67,20 +66,22 @@ public class TestWebHdfsTimeouts { private InetSocketAddress nnHttpAddress; private ServerSocket serverSocket; private Thread serverThread; + private URLConnectionFactory connectionFactory = new URLConnectionFactory(SHORT_SOCKET_TIMEOUT); @Before public void setUp() throws Exception { - URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT; Configuration conf = WebHdfsTestUtil.createConf(); nnHttpAddress = NameNode.getHttpAddress(conf); serverSocket = new ServerSocket(nnHttpAddress.getPort(), CONNECTION_BACKLOG); fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); + fs.connectionFactory = connectionFactory; clients = new ArrayList(); serverThread = null; } @After public void tearDown() throws Exception { + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()])); IOUtils.cleanup(LOG, fs); if (serverSocket != null) { @@ -240,7 +241,7 @@ public class TestWebHdfsTimeouts { */ private void startSingleTemporaryRedirectResponseThread( final boolean consumeConnectionBacklog) { - URLUtils.SOCKET_TIMEOUT = INITIAL_SOCKET_TIMEOUT; + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; serverThread = new Thread() { @Override public void run() { @@ -254,7 +255,7 @@ public class TestWebHdfsTimeouts { clientSocket = serverSocket.accept(); // Immediately setup conditions for subsequent connections. - URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT; + fs.connectionFactory = connectionFactory; if (consumeConnectionBacklog) { consumeConnectionBacklog(); } diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 8dee23a467e..bc7e802a2db 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -713,7 +713,7 @@ org.apache.maven.plugins maven-surefire-plugin - 2.12.3 + 2.16 org.apache.maven.plugins @@ -827,7 +827,7 @@ org.apache.maven.plugins maven-surefire-plugin - always + false 900 -Xmx1024m -XX:+HeapDumpOnOutOfMemoryError @@ -1002,23 +1002,5 @@ - From e10dbf41bc183aa8629e067d381e1c8ba9dae95a Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Fri, 6 Sep 2013 03:14:20 +0000 Subject: [PATCH 124/153] HDFS-4491. Add/delete files missed in prior commit. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520482 13f79535-47bb-0310-9956-ffa450edef68 --- ...RLUtils.java => URLConnectionFactory.java} | 20 ++++--- .../org/apache/hadoop/test/PathUtils.java | 54 +++++++++++++++++++ 2 files changed, 68 insertions(+), 6 deletions(-) rename hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/{URLUtils.java => URLConnectionFactory.java} (73%) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java similarity index 73% rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java index 09feaf5bec4..54aab04e58c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLUtils.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/URLConnectionFactory.java @@ -30,19 +30,27 @@ import org.apache.hadoop.classification.InterfaceStability; */ @InterfaceAudience.LimitedPrivate({"HDFS"}) @InterfaceStability.Unstable -public class URLUtils { +public class URLConnectionFactory { /** * Timeout for socket connects and reads */ - public static int SOCKET_TIMEOUT = 1*60*1000; // 1 minute + public final static int DEFAULT_SOCKET_TIMEOUT = 1*60*1000; // 1 minute + public static final URLConnectionFactory DEFAULT_CONNECTION_FACTORY = new URLConnectionFactory(DEFAULT_SOCKET_TIMEOUT); + + private int socketTimeout; + + public URLConnectionFactory(int socketTimeout) { + this.socketTimeout = socketTimeout; + } + /** * Opens a url with read and connect timeouts * @param url to open * @return URLConnection * @throws IOException */ - public static URLConnection openConnection(URL url) throws IOException { + public URLConnection openConnection(URL url) throws IOException { URLConnection connection = url.openConnection(); setTimeouts(connection); return connection; @@ -53,8 +61,8 @@ public class URLUtils { * * @param connection URLConnection to set */ - static void setTimeouts(URLConnection connection) { - connection.setConnectTimeout(SOCKET_TIMEOUT); - connection.setReadTimeout(SOCKET_TIMEOUT); + public void setTimeouts(URLConnection connection) { + connection.setConnectTimeout(socketTimeout); + connection.setReadTimeout(socketTimeout); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java new file mode 100644 index 00000000000..2ee4aa1390b --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/PathUtils.java @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.test; + +import java.io.File; + +import org.apache.hadoop.fs.Path; + +public class PathUtils { + + public static Path getTestPath(Class caller) { + return getTestPath(caller, true); + } + + public static Path getTestPath(Class caller, boolean create) { + return new Path(getTestDirName(caller)); + } + + public static File getTestDir(Class caller) { + return getTestDir(caller, true); + } + + public static File getTestDir(Class caller, boolean create) { + File dir = new File(System.getProperty("test.build.data", "/tmp"), caller.getSimpleName()); + if (create) { + dir.mkdirs(); + } + return dir; + } + + public static String getTestDirName(Class caller) { + return getTestDirName(caller, true); + } + + public static String getTestDirName(Class caller, boolean create) { + return getTestDir(caller, create).getAbsolutePath(); + } + +} From a62839195548a632b1b6197456b9adf95127427c Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 6 Sep 2013 05:23:54 +0000 Subject: [PATCH 125/153] HDFS-5164. DeleteSnapshot should check if OperationCategory.WRITE is possible before taking write lock (contributed by Colin Patrick McCabe) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520492 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 1 + 2 files changed, 4 insertions(+) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 2015f2954f4..dd445c2fd97 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -283,6 +283,9 @@ Release 2.3.0 - UNRELEASED HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image transfer. (Andrew Wang) + HDFS-5164. deleteSnapshot should check if OperationCategory.WRITE is + possible before taking write lock. (Colin Patrick McCabe) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 0ed66324e45..42965ed189e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -6767,6 +6767,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, return; // Return previous response } boolean success = false; + checkOperation(OperationCategory.WRITE); writeLock(); try { checkOperation(OperationCategory.WRITE); From 36d0b822ef46fcacdb773abbdd3e81386eb4d63c Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 6 Sep 2013 17:17:30 +0000 Subject: [PATCH 126/153] HDS-5118. Provide testing support for DFSClient to drop RPC responses. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520637 13f79535-47bb-0310-9956-ffa450edef68 --- .../io/retry/LossyRetryInvocationHandler.java | 62 ++++++++++++++++++ .../io/retry/RetryInvocationHandler.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../org/apache/hadoop/hdfs/DFSClient.java | 33 ++++++++-- .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 6 ++ .../apache/hadoop/hdfs/NameNodeProxies.java | 64 +++++++++++++++++++ 6 files changed, 162 insertions(+), 8 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java new file mode 100644 index 00000000000..df5895553a6 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/LossyRetryInvocationHandler.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.io.retry; + +import java.lang.reflect.Method; +import java.net.UnknownHostException; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * A dummy invocation handler extending RetryInvocationHandler. It drops the + * first N number of responses. This invocation handler is only used for testing. + */ +@InterfaceAudience.Private +public class LossyRetryInvocationHandler extends RetryInvocationHandler { + private final int numToDrop; + private static final ThreadLocal RetryCount = + new ThreadLocal(); + + public LossyRetryInvocationHandler(int numToDrop, + FailoverProxyProvider proxyProvider, RetryPolicy retryPolicy) { + super(proxyProvider, retryPolicy); + this.numToDrop = numToDrop; + } + + @Override + public Object invoke(Object proxy, Method method, Object[] args) + throws Throwable { + RetryCount.set(0); + return super.invoke(proxy, method, args); + } + + @Override + protected Object invokeMethod(Method method, Object[] args) throws Throwable { + Object result = super.invokeMethod(method, args); + int retryCount = RetryCount.get(); + if (retryCount < this.numToDrop) { + RetryCount.set(++retryCount); + LOG.info("Drop the response. Current retryCount == " + retryCount); + throw new UnknownHostException("Fake Exception"); + } else { + LOG.info("retryCount == " + retryCount + + ". It's time to normally process the response"); + return result; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java index 974bac91eb4..51dd46a8f9e 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java @@ -63,7 +63,7 @@ public class RetryInvocationHandler implements RpcInvocationHandler { this(proxyProvider, retryPolicy, Collections.emptyMap()); } - RetryInvocationHandler(FailoverProxyProvider proxyProvider, + protected RetryInvocationHandler(FailoverProxyProvider proxyProvider, RetryPolicy defaultPolicy, Map methodNameToPolicyMap) { this.proxyProvider = proxyProvider; diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index dd445c2fd97..dd86e0e4e4a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -244,6 +244,9 @@ Release 2.3.0 - UNRELEASED NEW FEATURES + HDFS-5118. Provide testing support for DFSClient to drop RPC responses. + (jing9) + IMPROVEMENTS HDFS-4657. Limit the number of blocks logged by the NN after a block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 511df17b472..d1cc7848916 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -27,6 +27,9 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIE import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_BLOCK_WRITE_RETRIES_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHED_CONN_RETRY_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT; @@ -44,9 +47,6 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIR import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_USE_DN_HOSTNAME_DEFAULT; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_READS; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_DROP_BEHIND_WRITES; -import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_CACHE_READAHEAD; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_EXCLUDE_NODES_CACHE_EXPIRY_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; @@ -100,6 +100,7 @@ import org.apache.hadoop.fs.MD5MD5CRC32GzipFileChecksum; import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options.ChecksumOpt; import org.apache.hadoop.fs.ParentNotDirectoryException; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; import org.apache.hadoop.fs.VolumeId; import org.apache.hadoop.fs.permission.FsPermission; @@ -113,13 +114,13 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.HdfsBlocksMetadata; import org.apache.hadoop.hdfs.protocol.HdfsConstants; -import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType; import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; +import org.apache.hadoop.hdfs.protocol.SnapshotAccessControlException; import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport; import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus; import org.apache.hadoop.hdfs.protocol.UnresolvedPathException; @@ -144,6 +145,7 @@ import org.apache.hadoop.io.EnumSetWritable; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.MD5Hash; import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.retry.LossyRetryInvocationHandler; import org.apache.hadoop.ipc.Client; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RemoteException; @@ -451,7 +453,11 @@ public class DFSClient implements java.io.Closeable { /** * Create a new DFSClient connected to the given nameNodeUri or rpcNamenode. - * Exactly one of nameNodeUri or rpcNamenode must be null. + * If HA is enabled and a positive value is set for + * {@link DFSConfigKeys#DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY} in the + * configuration, the DFSClient will use {@link LossyRetryInvocationHandler} + * as its RetryInvocationHandler. Otherwise one of nameNodeUri or rpcNamenode + * must be null. */ @VisibleForTesting public DFSClient(URI nameNodeUri, ClientProtocol rpcNamenode, @@ -475,7 +481,20 @@ public class DFSClient implements java.io.Closeable { this.clientName = "DFSClient_" + dfsClientConf.taskId + "_" + DFSUtil.getRandom().nextInt() + "_" + Thread.currentThread().getId(); - if (rpcNamenode != null) { + int numResponseToDrop = conf.getInt( + DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY, + DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT); + if (numResponseToDrop > 0) { + // This case is used for testing. + LOG.warn(DFSConfigKeys.DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY + + " is set to " + numResponseToDrop + + ", this hacked client will proactively drop responses"); + NameNodeProxies.ProxyAndInfo proxyInfo = NameNodeProxies + .createProxyWithLossyRetryHandler(conf, nameNodeUri, + ClientProtocol.class, numResponseToDrop); + this.dtService = proxyInfo.getDelegationTokenService(); + this.namenode = proxyInfo.getProxy(); + } else if (rpcNamenode != null) { // This case is used for testing. Preconditions.checkArgument(nameNodeUri == null); this.namenode = rpcNamenode; @@ -514,7 +533,7 @@ public class DFSClient implements java.io.Closeable { this.defaultWriteCachingStrategy = new CachingStrategy(writeDropBehind, readahead); } - + /** * Return the socket addresses to use with each configured * local interface. Local interfaces may be specified by IP diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index a66ec939613..b4d67ca19d2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -497,6 +497,12 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final long DFS_NAMENODE_RETRY_CACHE_EXPIRYTIME_MILLIS_DEFAULT = 600000; // 10 minutes public static final String DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_KEY = "dfs.namenode.retrycache.heap.percent"; public static final float DFS_NAMENODE_RETRY_CACHE_HEAP_PERCENT_DEFAULT = 0.03f; + + // The number of NN response dropped by client proactively in each RPC call. + // For testing NN retry cache, we can set this property with positive value. + public static final String DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_KEY = "dfs.client.test.drop.namenode.response.number"; + public static final int DFS_CLIENT_TEST_DROP_NAMENODE_RESPONSE_NUM_DEFAULT = 0; + // Hidden configuration undocumented in hdfs-site. xml // Timeout to wait for block receiver and responder thread to stop diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java index eb745b8bb7d..41dac6a80f6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java @@ -17,10 +17,18 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY; import java.io.IOException; import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.Proxy; import java.net.InetSocketAddress; import java.net.URI; import java.util.HashMap; @@ -48,6 +56,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.retry.DefaultFailoverProxyProvider; import org.apache.hadoop.io.retry.FailoverProxyProvider; +import org.apache.hadoop.io.retry.LossyRetryInvocationHandler; import org.apache.hadoop.io.retry.RetryPolicies; import org.apache.hadoop.io.retry.RetryPolicy; import org.apache.hadoop.io.retry.RetryProxy; @@ -144,6 +153,61 @@ public class NameNodeProxies { return new ProxyAndInfo(proxy, dtService); } } + + /** + * Generate a dummy namenode proxy instance that utilizes our hacked + * {@link LossyRetryInvocationHandler}. Proxy instance generated using this + * method will proactively drop RPC responses. Currently this method only + * support HA setup. IllegalStateException will be thrown if the given + * configuration is not for HA. + * + * @param config the configuration containing the required IPC + * properties, client failover configurations, etc. + * @param nameNodeUri the URI pointing either to a specific NameNode + * or to a logical nameservice. + * @param xface the IPC interface which should be created + * @param numResponseToDrop The number of responses to drop for each RPC call + * @return an object containing both the proxy and the associated + * delegation token service it corresponds to + * @throws IOException if there is an error creating the proxy + */ + @SuppressWarnings("unchecked") + public static ProxyAndInfo createProxyWithLossyRetryHandler( + Configuration config, URI nameNodeUri, Class xface, + int numResponseToDrop) throws IOException { + Preconditions.checkArgument(numResponseToDrop > 0); + Class> failoverProxyProviderClass = + getFailoverProxyProviderClass(config, nameNodeUri, xface); + if (failoverProxyProviderClass != null) { // HA case + FailoverProxyProvider failoverProxyProvider = + createFailoverProxyProvider(config, failoverProxyProviderClass, + xface, nameNodeUri); + int delay = config.getInt( + DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_KEY, + DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT); + int maxCap = config.getInt( + DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_KEY, + DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT); + int maxFailoverAttempts = config.getInt( + DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_KEY, + DFS_CLIENT_FAILOVER_MAX_ATTEMPTS_DEFAULT); + InvocationHandler dummyHandler = new LossyRetryInvocationHandler( + numResponseToDrop, failoverProxyProvider, + RetryPolicies.failoverOnNetworkException( + RetryPolicies.TRY_ONCE_THEN_FAIL, + Math.max(numResponseToDrop + 1, maxFailoverAttempts), delay, + maxCap)); + + T proxy = (T) Proxy.newProxyInstance( + failoverProxyProvider.getInterface().getClassLoader(), + new Class[] { xface }, dummyHandler); + Text dtService = HAUtil.buildTokenServiceForLogicalUri(nameNodeUri); + return new ProxyAndInfo(proxy, dtService); + } else { + throw new IllegalStateException("Currently creating proxy using " + + "LossyRetryInvocationHandler requires NN HA setup"); + } + } /** * Creates an explicitly non-HA-enabled proxy object. Most of the time you From 4065c842ab72abfd6efcf67b195cc51cfbf532e7 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Fri, 6 Sep 2013 18:16:30 +0000 Subject: [PATCH 127/153] Move HDFS-5118 to 2.1.1-beta section. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520650 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index dd86e0e4e4a..f5b2cc17093 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -244,9 +244,6 @@ Release 2.3.0 - UNRELEASED NEW FEATURES - HDFS-5118. Provide testing support for DFSClient to drop RPC responses. - (jing9) - IMPROVEMENTS HDFS-4657. Limit the number of blocks logged by the NN after a block @@ -324,6 +321,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5136 MNT EXPORT should give the full group list which can mount the exports (brandonli) + HDFS-5118. Provide testing support for DFSClient to drop RPC responses. + (jing9) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From fc9599811712b4e5e32304afa2f22c9ec6a2e21b Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Fri, 6 Sep 2013 18:30:00 +0000 Subject: [PATCH 128/153] Fixing CHANGES.txt for YARN-758 as it is now merged into branch-2.1-beta. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520659 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index f3cd1c30726..7fe2e0af20c 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -32,9 +32,6 @@ Release 2.3.0 - UNRELEASED BUG FIXES - YARN-758. Augment MockNM to use multiple cores (Karthik Kambatla via - Sandy Ryza) - YARN-1060. Two tests in TestFairScheduler are missing @Test annotation (Niranjan Singh via Sandy Ryza) @@ -89,6 +86,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1065. NM should provide AuxillaryService data to the container (Xuan Gong via bikas) + YARN-758. Augment MockNM to use multiple cores (Karthik Kambatla via + Sandy Ryza) + OPTIMIZATIONS BUG FIXES From 6431192c0ee00ecfe578b270889b0c7a0a9cb8c8 Mon Sep 17 00:00:00 2001 From: Colin McCabe Date: Fri, 6 Sep 2013 19:05:26 +0000 Subject: [PATCH 129/153] HDFS-4879. Add BlockedArrayList collection to avoid CMS full GCs (Contributed by Todd Lipcon) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520667 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + .../hdfs/server/namenode/FSDirectory.java | 5 +- .../hdfs/server/namenode/FSEditLogLoader.java | 4 +- .../hdfs/server/namenode/FSNamesystem.java | 17 +- .../hadoop/hdfs/server/namenode/INode.java | 3 +- .../hadoop/hdfs/util/ChunkedArrayList.java | 171 ++++++++++++++++++ .../hdfs/util/TestChunkedArrayList.java | 93 ++++++++++ 7 files changed, 281 insertions(+), 15 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index f5b2cc17093..3dbcde5d619 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -269,6 +269,9 @@ Release 2.3.0 - UNRELEASED HDFS-4491. Parallel testing HDFS. (Andrey Klochkov via cnauroth) + HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs + (Todd Lipcon via Colin Patrick McCabe) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 51642a8b23a..4af1f963fa3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -69,6 +69,7 @@ import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapsho import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot.Root; import org.apache.hadoop.hdfs.util.ByteArray; +import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; @@ -949,7 +950,7 @@ public class FSDirectory implements Closeable { if (removedDst != null) { undoRemoveDst = false; BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - List removedINodes = new ArrayList(); + List removedINodes = new ChunkedArrayList(); filesDeleted = removedDst.cleanSubtree(null, dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true) .get(Quota.NAMESPACE); @@ -1363,7 +1364,7 @@ public class FSDirectory implements Closeable { QuotaExceededException, SnapshotAccessControlException { assert hasWriteLock(); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - List removedINodes = new ArrayList(); + List removedINodes = new ChunkedArrayList(); final INodesInPath inodesInPath = rootDir.getINodesInPath4Write( normalizePath(src), false); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java index f19b15cd9f6..03a1dbc1fbd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogLoader.java @@ -22,7 +22,6 @@ import static org.apache.hadoop.util.Time.now; import java.io.FilterInputStream; import java.io.IOException; import java.io.InputStream; -import java.util.ArrayList; import java.util.Arrays; import java.util.EnumMap; import java.util.List; @@ -75,6 +74,7 @@ import org.apache.hadoop.hdfs.server.namenode.startupprogress.Phase; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress; import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress.Counter; import org.apache.hadoop.hdfs.server.namenode.startupprogress.Step; +import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.Holder; import com.google.common.base.Joiner; @@ -582,7 +582,7 @@ public class FSEditLogLoader { case OP_DELETE_SNAPSHOT: { DeleteSnapshotOp deleteSnapshotOp = (DeleteSnapshotOp) op; BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - List removedINodes = new ArrayList(); + List removedINodes = new ChunkedArrayList(); fsNamesys.getSnapshotManager().deleteSnapshot( deleteSnapshotOp.snapshotRoot, deleteSnapshotOp.snapshotName, collectedBlocks, removedINodes); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 42965ed189e..884a14b8959 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -201,6 +201,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand; import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration; import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; +import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.io.Text; import org.apache.hadoop.ipc.RetryCache; @@ -3130,7 +3131,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, throws AccessControlException, SafeModeException, UnresolvedLinkException, IOException { BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - List removedINodes = new ArrayList(); + List removedINodes = new ChunkedArrayList(); FSPermissionChecker pc = getPermissionChecker(); checkOperation(OperationCategory.WRITE); byte[][] pathComponents = FSDirectory.getPathComponentsForReservedPath(src); @@ -3184,21 +3185,17 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * of blocks that need to be removed from blocksMap */ void removeBlocks(BlocksMapUpdateInfo blocks) { - int start = 0; - int end = 0; List toDeleteList = blocks.getToDeleteList(); - while (start < toDeleteList.size()) { - end = BLOCK_DELETION_INCREMENT + start; - end = end > toDeleteList.size() ? toDeleteList.size() : end; + Iterator iter = toDeleteList.iterator(); + while (iter.hasNext()) { writeLock(); try { - for (int i = start; i < end; i++) { - blockManager.removeBlock(toDeleteList.get(i)); + for (int i = 0; i < BLOCK_DELETION_INCREMENT && iter.hasNext(); i++) { + blockManager.removeBlock(iter.next()); } } finally { writeUnlock(); } - start = end; } } @@ -6778,7 +6775,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, checkOwner(pc, snapshotRoot); BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); - List removedINodes = new ArrayList(); + List removedINodes = new ChunkedArrayList(); dir.writeLock(); try { snapshotManager.deleteSnapshot(snapshotRoot, snapshotName, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 94ad7a8479c..977c801013e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName; import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot; import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot; +import org.apache.hadoop.hdfs.util.ChunkedArrayList; import org.apache.hadoop.hdfs.util.Diff; import org.apache.hadoop.util.StringUtils; @@ -707,7 +708,7 @@ public abstract class INode implements INodeAttributes, Diff.Element { } public BlocksMapUpdateInfo() { - toDeleteList = new ArrayList(); + toDeleteList = new ChunkedArrayList(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java new file mode 100644 index 00000000000..89a0db6eb47 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/ChunkedArrayList.java @@ -0,0 +1,171 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import java.util.AbstractList; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.classification.InterfaceAudience; + +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Preconditions; +import com.google.common.collect.Iterables; +import com.google.common.collect.Lists; + +/** + * Simplified List implementation which stores elements as a list + * of chunks, each chunk having a maximum size. This improves over + * using an ArrayList in that creating a large list will never require + * a large amount of contiguous heap space -- thus reducing the likelihood + * of triggering a CMS compaction pause due to heap fragmentation. + * + * The first chunks allocated are small, but each additional chunk is + * 50% larger than the previous, ramping up to a configurable maximum + * chunk size. Reasonable defaults are provided which should be a good + * balance between not making any large allocations while still retaining + * decent performance. + * + * This currently only supports a small subset of List operations -- + * namely addition and iteration. + */ +@InterfaceAudience.Private +public class ChunkedArrayList extends AbstractList { + + /** + * The chunks which make up the full list. + */ + private final List> chunks = Lists.newArrayList(); + + /** + * Cache of the last element in the 'chunks' array above. + * This speeds up the add operation measurably. + */ + private List lastChunk = null; + + /** + * The capacity with which the last chunk was allocated. + */ + private int lastChunkCapacity; + + /** + * The capacity of the first chunk to allocate in a cleared list. + */ + private final int initialChunkCapacity; + + /** + * The maximum number of elements for any chunk. + */ + private final int maxChunkSize; + + /** + * Total number of elements in the list. + */ + private int size; + + /** + * Default initial size is 6 elements, since typical minimum object + * size is 64 bytes, and this leaves enough space for the object + * header. + */ + private static final int DEFAULT_INITIAL_CHUNK_CAPACITY = 6; + + /** + * Default max size is 8K elements - which, at 8 bytes per element + * should be about 64KB -- small enough to easily fit in contiguous + * free heap space even with a fair amount of fragmentation. + */ + private static final int DEFAULT_MAX_CHUNK_SIZE = 8*1024; + + + public ChunkedArrayList() { + this(DEFAULT_INITIAL_CHUNK_CAPACITY, DEFAULT_MAX_CHUNK_SIZE); + } + + /** + * @param initialChunkCapacity the capacity of the first chunk to be + * allocated + * @param maxChunkSize the maximum size of any chunk allocated + */ + public ChunkedArrayList(int initialChunkCapacity, int maxChunkSize) { + Preconditions.checkArgument(maxChunkSize >= initialChunkCapacity); + this.initialChunkCapacity = initialChunkCapacity; + this.maxChunkSize = maxChunkSize; + } + + @Override + public Iterator iterator() { + return Iterables.concat(chunks).iterator(); + } + + @Override + public boolean add(T e) { + if (lastChunk == null) { + addChunk(initialChunkCapacity); + } else if (lastChunk.size() >= lastChunkCapacity) { + int newCapacity = lastChunkCapacity + (lastChunkCapacity >> 1); + addChunk(Math.min(newCapacity, maxChunkSize)); + } + size++; + return lastChunk.add(e); + } + + @Override + public void clear() { + chunks.clear(); + lastChunk = null; + lastChunkCapacity = 0; + size = 0; + } + + private void addChunk(int capacity) { + lastChunk = Lists.newArrayListWithCapacity(capacity); + chunks.add(lastChunk); + lastChunkCapacity = capacity; + } + + @Override + public boolean isEmpty() { + return size == 0; + } + + @Override + public int size() { + return size; + } + + @VisibleForTesting + int getNumChunks() { + return chunks.size(); + } + + @VisibleForTesting + int getMaxChunkSize() { + int size = 0; + for (List chunk : chunks) { + size = Math.max(size, chunk.size()); + } + return size; + } + + @Override + public T get(int arg0) { + throw new UnsupportedOperationException( + this.getClass().getName() + " does not support random access"); + } +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java new file mode 100644 index 00000000000..a1e49ccee28 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java @@ -0,0 +1,93 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.util; + +import static org.junit.Assert.*; + +import java.util.ArrayList; + +import org.junit.Test; + +import com.google.common.base.Stopwatch; + +public class TestChunkedArrayList { + + @Test + public void testBasics() { + final int N_ELEMS = 100000; + ChunkedArrayList l = new ChunkedArrayList(); + assertTrue(l.isEmpty()); + // Insert a bunch of elements. + for (int i = 0; i < N_ELEMS; i++) { + l.add(i); + } + assertFalse(l.isEmpty()); + assertEquals(N_ELEMS, l.size()); + + // Check that it got chunked. + assertTrue(l.getNumChunks() > 10); + assertEquals(8192, l.getMaxChunkSize()); + } + + @Test + public void testIterator() { + ChunkedArrayList l = new ChunkedArrayList(); + for (int i = 0; i < 30000; i++) { + l.add(i); + } + + int i = 0; + for (int fromList : l) { + assertEquals(i, fromList); + i++; + } + } + + @Test + public void testPerformance() { + String obj = "hello world"; + + final int numElems = 1000000; + final int numTrials = 5; + + for (int trial = 0; trial < numTrials; trial++) { + System.gc(); + { + ArrayList arrayList = new ArrayList(); + Stopwatch sw = new Stopwatch(); + sw.start(); + for (int i = 0; i < numElems; i++) { + arrayList.add(obj); + } + System.out.println(" ArrayList " + sw.elapsedMillis()); + } + + // test ChunkedArrayList + System.gc(); + { + ChunkedArrayList chunkedList = new ChunkedArrayList(); + Stopwatch sw = new Stopwatch(); + sw.start(); + for (int i = 0; i < numElems; i++) { + chunkedList.add(obj); + } + System.out.println("ChunkedArrayList " + sw.elapsedMillis()); + } + } + } +} From 6cfcd391652ffe03fa95fdf2e29b1d10fcae4ea6 Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Fri, 6 Sep 2013 19:40:02 +0000 Subject: [PATCH 130/153] HADOOP-8704. add request logging to jetty/httpserver (jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520674 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 + .../src/main/conf/log4j.properties | 25 +++++ .../apache/hadoop/http/HttpRequestLog.java | 94 +++++++++++++++++++ .../hadoop/http/HttpRequestLogAppender.java | 62 ++++++++++++ .../org/apache/hadoop/http/HttpServer.java | 16 +++- .../hadoop/http/HttpServerFunctionalTest.java | 6 +- .../hadoop/http/TestHttpRequestLog.java | 47 ++++++++++ .../http/TestHttpRequestLogAppender.java | 37 ++++++++ .../hadoop/http/TestHttpServerLifecycle.java | 22 +++++ 9 files changed, 309 insertions(+), 2 deletions(-) create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java create mode 100644 hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLogAppender.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLog.java create mode 100644 hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLogAppender.java diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index c2beef64124..22ea0bee75c 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -2117,6 +2117,8 @@ Release 0.23.10 - UNRELEASED HADOOP-9686. Easy access to final parameters in Configuration (Jason Lowe via jeagles) + HADOOP-8704. add request logging to jetty/httpserver (jeagles) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties index d436db9df3b..8ef5eb4635b 100644 --- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties +++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties @@ -240,3 +240,28 @@ log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n #log4j.appender.HSAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n #log4j.appender.HSAUDIT.DatePattern=.yyyy-MM-dd +# Http Server Request Logs +#log4j.logger.http.requests.namenode=INFO,namenoderequestlog +#log4j.appender.namenoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.namenoderequestlog.Filename=${hadoop.log.dir}/jetty-namenode-yyyy_mm_dd.log +#log4j.appender.namenoderequestlog.RetainDays=3 + +#log4j.logger.http.requests.datanode=INFO,datanoderequestlog +#log4j.appender.datanoderequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.datanoderequestlog.Filename=${hadoop.log.dir}/jetty-datanode-yyyy_mm_dd.log +#log4j.appender.datanoderequestlog.RetainDays=3 + +#log4j.logger.http.requests.resourcemanager=INFO,resourcemanagerrequestlog +#log4j.appender.resourcemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.resourcemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-resourcemanager-yyyy_mm_dd.log +#log4j.appender.resourcemanagerrequestlog.RetainDays=3 + +#log4j.logger.http.requests.jobhistory=INFO,jobhistoryrequestlog +#log4j.appender.jobhistoryrequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.jobhistoryrequestlog.Filename=${hadoop.log.dir}/jetty-jobhistory-yyyy_mm_dd.log +#log4j.appender.jobhistoryrequestlog.RetainDays=3 + +#log4j.logger.http.requests.nodemanager=INFO,nodemanagerrequestlog +#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender +#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-yyyy_mm_dd.log +#log4j.appender.nodemanagerrequestlog.RetainDays=3 diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java new file mode 100644 index 00000000000..eb8968bcc49 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLog.java @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import java.util.HashMap; + +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogConfigurationException; +import org.apache.commons.logging.LogFactory; +import org.apache.log4j.Appender; +import org.apache.log4j.Logger; +import org.mortbay.jetty.NCSARequestLog; +import org.mortbay.jetty.RequestLog; + +/** + * RequestLog object for use with Http + */ +public class HttpRequestLog { + + public static final Log LOG = LogFactory.getLog(HttpRequestLog.class); + private static final HashMap serverToComponent; + + static { + serverToComponent = new HashMap(); + serverToComponent.put("cluster", "resourcemanager"); + serverToComponent.put("hdfs", "namenode"); + serverToComponent.put("node", "nodemanager"); + } + + public static RequestLog getRequestLog(String name) { + + String lookup = serverToComponent.get(name); + if (lookup != null) { + name = lookup; + } + String loggerName = "http.requests." + name; + String appenderName = name + "requestlog"; + Log logger = LogFactory.getLog(loggerName); + + if (logger instanceof Log4JLogger) { + Log4JLogger httpLog4JLog = (Log4JLogger)logger; + Logger httpLogger = httpLog4JLog.getLogger(); + Appender appender = null; + + try { + appender = httpLogger.getAppender(appenderName); + } catch (LogConfigurationException e) { + LOG.warn("Http request log for " + loggerName + + " could not be created"); + throw e; + } + + if (appender == null) { + LOG.info("Http request log for " + loggerName + + " is not defined"); + return null; + } + + if (appender instanceof HttpRequestLogAppender) { + HttpRequestLogAppender requestLogAppender + = (HttpRequestLogAppender)appender; + NCSARequestLog requestLog = new NCSARequestLog(); + requestLog.setFilename(requestLogAppender.getFilename()); + requestLog.setRetainDays(requestLogAppender.getRetainDays()); + return requestLog; + } + else { + LOG.warn("Jetty request log for " + loggerName + + " was of the wrong class"); + return null; + } + } + else { + LOG.warn("Jetty request log can only be enabled using Log4j"); + return null; + } + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLogAppender.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLogAppender.java new file mode 100644 index 00000000000..eda1d1fee40 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpRequestLogAppender.java @@ -0,0 +1,62 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import org.apache.log4j.spi.LoggingEvent; +import org.apache.log4j.AppenderSkeleton; + +/** + * Log4j Appender adapter for HttpRequestLog + */ +public class HttpRequestLogAppender extends AppenderSkeleton { + + private String filename; + private int retainDays; + + public HttpRequestLogAppender() { + } + + public void setRetainDays(int retainDays) { + this.retainDays = retainDays; + } + + public int getRetainDays() { + return retainDays; + } + + public void setFilename(String filename) { + this.filename = filename; + } + + public String getFilename() { + return filename; + } + + @Override + public void append(LoggingEvent event) { + } + + @Override + public void close() { + } + + @Override + public boolean requiresLayout() { + return false; + } +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index 50582065473..b28e59424ed 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -67,9 +67,12 @@ import org.mortbay.io.Buffer; import org.mortbay.jetty.Connector; import org.mortbay.jetty.Handler; import org.mortbay.jetty.MimeTypes; +import org.mortbay.jetty.RequestLog; import org.mortbay.jetty.Server; import org.mortbay.jetty.handler.ContextHandler; import org.mortbay.jetty.handler.ContextHandlerCollection; +import org.mortbay.jetty.handler.RequestLogHandler; +import org.mortbay.jetty.handler.HandlerCollection; import org.mortbay.jetty.nio.SelectChannelConnector; import org.mortbay.jetty.security.SslSocketConnector; import org.mortbay.jetty.servlet.Context; @@ -355,7 +358,18 @@ public class HttpServer implements FilterContainer { final String appDir = getWebAppsPath(name); ContextHandlerCollection contexts = new ContextHandlerCollection(); - webServer.setHandler(contexts); + RequestLog requestLog = HttpRequestLog.getRequestLog(name); + + if (requestLog != null) { + RequestLogHandler requestLogHandler = new RequestLogHandler(); + requestLogHandler.setRequestLog(requestLog); + HandlerCollection handlers = new HandlerCollection(); + handlers.setHandlers(new Handler[] {requestLogHandler, contexts}); + webServer.setHandler(handlers); + } + else { + webServer.setHandler(contexts); + } webAppContext = new WebAppContext(); webAppContext.setDisplayName(name); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java index 52d569d6e6d..ee86ebcdd0d 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/HttpServerFunctionalTest.java @@ -101,8 +101,12 @@ public class HttpServerFunctionalTest extends Assert { String webapps = System.getProperty(TEST_BUILD_WEBAPPS, BUILD_WEBAPPS_DIR); File testWebappDir = new File(webapps + File.separatorChar + TEST); + try { if (!testWebappDir.exists()) { - fail("Test webapp dir " + testWebappDir + " missing"); + fail("Test webapp dir " + testWebappDir.getCanonicalPath() + " missing"); + } + } + catch (IOException e) { } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLog.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLog.java new file mode 100644 index 00000000000..23e0d3e8a1e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLog.java @@ -0,0 +1,47 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import org.apache.log4j.Logger; +import org.junit.Test; +import org.mortbay.jetty.NCSARequestLog; +import org.mortbay.jetty.RequestLog; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertNull; + +public class TestHttpRequestLog { + + @Test + public void testAppenderUndefined() { + RequestLog requestLog = HttpRequestLog.getRequestLog("test"); + assertNull("RequestLog should be null", requestLog); + } + + @Test + public void testAppenderDefined() { + HttpRequestLogAppender requestLogAppender = new HttpRequestLogAppender(); + requestLogAppender.setName("testrequestlog"); + Logger.getLogger("http.requests.test").addAppender(requestLogAppender); + RequestLog requestLog = HttpRequestLog.getRequestLog("test"); + Logger.getLogger("http.requests.test").removeAppender(requestLogAppender); + assertNotNull("RequestLog should not be null", requestLog); + assertEquals("Class mismatch", NCSARequestLog.class, requestLog.getClass()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLogAppender.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLogAppender.java new file mode 100644 index 00000000000..e84bee06e6e --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpRequestLogAppender.java @@ -0,0 +1,37 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.http; + +import org.junit.Test; + +import static org.junit.Assert.assertEquals; + +public class TestHttpRequestLogAppender { + + @Test + public void testParameterPropagation() { + + HttpRequestLogAppender requestLogAppender = new HttpRequestLogAppender(); + requestLogAppender.setFilename("jetty-namenode-yyyy_mm_dd.log"); + requestLogAppender.setRetainDays(17); + assertEquals("Filename mismatch", "jetty-namenode-yyyy_mm_dd.log", + requestLogAppender.getFilename()); + assertEquals("Retain days mismatch", 17, + requestLogAppender.getRetainDays()); + } +} diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java index 27dd67f39c5..d7330e87140 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServerLifecycle.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.http; +import org.apache.log4j.Logger; import org.junit.Test; public class TestHttpServerLifecycle extends HttpServerFunctionalTest { @@ -66,6 +67,27 @@ public class TestHttpServerLifecycle extends HttpServerFunctionalTest { stop(server); } + /** + * Test that the server with request logging enabled + * + * @throws Throwable on failure + */ + @Test + public void testStartedServerWithRequestLog() throws Throwable { + HttpRequestLogAppender requestLogAppender = new HttpRequestLogAppender(); + requestLogAppender.setName("httprequestlog"); + requestLogAppender.setFilename(System.getProperty("test.build.data", "/tmp/") + + "jetty-name-yyyy_mm_dd.log"); + Logger.getLogger(HttpServer.class.getName() + ".test").addAppender(requestLogAppender); + HttpServer server = null; + server = createTestServer(); + assertNotLive(server); + server.start(); + assertAlive(server); + stop(server); + Logger.getLogger(HttpServer.class.getName() + ".test").removeAppender(requestLogAppender); + } + /** * Assert that the result of {@link HttpServer#toString()} contains the specific text * @param server server to examine From efc1048ffe10695666cc70be83c2b51cab8cdf0a Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Fri, 6 Sep 2013 22:57:44 +0000 Subject: [PATCH 131/153] Revert HADOOP-9877 because of breakage reported in HADOOP-9912 git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520713 13f79535-47bb-0310-9956-ffa450edef68 --- .../hadoop-common/CHANGES.txt | 2 - .../java/org/apache/hadoop/fs/Globber.java | 56 +++---------------- .../fs/FileContextMainOperationsBaseTest.java | 15 ----- .../hadoop/fs/TestFsShellReturnCode.java | 20 ------- .../fs/TestHDFSFileContextMainOperations.java | 11 ---- 5 files changed, 8 insertions(+), 96 deletions(-) diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 22ea0bee75c..bc130ff364e 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -357,8 +357,6 @@ Release 2.3.0 - UNRELEASED HADOOP-9865. FileContext#globStatus has a regression with respect to relative path. (Chuan Lin via Colin Patrick McCabe) - HADOOP-9877. Fix listing of snapshot directories in globStatus. - (Binglin Chang via Andrew Wang) HADOOP-9909. org.apache.hadoop.fs.Stat should permit other LANG. (Shinichi Yamashita via Andrew Wang) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java index b0bd8490715..bae801ef0a3 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Globber.java @@ -62,18 +62,6 @@ class Globber { } } - private FileStatus getFileLinkStatus(Path path) { - try { - if (fs != null) { - return fs.getFileLinkStatus(path); - } else { - return fc.getFileLinkStatus(path); - } - } catch (IOException e) { - return null; - } - } - private FileStatus[] listStatus(Path path) { try { if (fs != null) { @@ -134,18 +122,6 @@ class Globber { return authority ; } - /** - * The glob filter builds a regexp per path component. If the component - * does not contain a shell metachar, then it falls back to appending the - * raw string to the list of built up paths. This raw path needs to have - * the quoting removed. Ie. convert all occurrences of "\X" to "X" - * @param name of the path component - * @return the unquoted path component - */ - private static String unquotePathComponent(String name) { - return name.replaceAll("\\\\(.)", "$1"); - } - public FileStatus[] glob() throws IOException { // First we get the scheme and authority of the pattern that was passed // in. @@ -210,30 +186,14 @@ class Globber { resolvedCandidate.isDirectory() == false) { continue; } - // For components without pattern, we get its FileStatus directly - // using getFileLinkStatus for two reasons: - // 1. It should be faster to only get FileStatus needed rather than - // get all children. - // 2. Some special filesystem directories (e.g. HDFS snapshot - // directories) are not returned by listStatus, but do exist if - // checked explicitly via getFileLinkStatus. - if (globFilter.hasPattern()) { - FileStatus[] children = listStatus(candidate.getPath()); - for (FileStatus child : children) { - // Set the child path based on the parent path. - // This keeps the symlinks in our path. - child.setPath(new Path(candidate.getPath(), - child.getPath().getName())); - if (globFilter.accept(child.getPath())) { - newCandidates.add(child); - } - } - } else { - Path p = new Path(candidate.getPath(), unquotePathComponent(component)); - FileStatus s = getFileLinkStatus(p); - if (s != null) { - s.setPath(p); - newCandidates.add(s); + FileStatus[] children = listStatus(candidate.getPath()); + for (FileStatus child : children) { + // Set the child path based on the parent path. + // This keeps the symlinks in our path. + child.setPath(new Path(candidate.getPath(), + child.getPath().getName())); + if (globFilter.accept(child.getPath())) { + newCandidates.add(child); } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java index 877a491bf9b..354f7aabfd6 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextMainOperationsBaseTest.java @@ -30,7 +30,6 @@ import org.apache.hadoop.fs.Options.Rename; import org.apache.hadoop.fs.permission.FsPermission; import org.junit.After; import org.junit.Assert; -import org.junit.Assume; import org.junit.Before; import org.junit.Test; @@ -633,20 +632,6 @@ public abstract class FileContextMainOperationsBaseTest { filteredPaths)); } - protected Path getHiddenPathForTest() { - return null; - } - - @Test - public void testGlobStatusFilterWithHiddenPathTrivialFilter() - throws Exception { - Path hidden = getHiddenPathForTest(); - Assume.assumeNotNull(hidden); - FileStatus[] filteredPaths = fc.util().globStatus(hidden, DEFAULT_FILTER); - Assert.assertNotNull(filteredPaths); - Assert.assertEquals(1, filteredPaths.length); - } - @Test public void testWriteReadAndDeleteEmptyFile() throws Exception { writeReadAndDelete(0); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java index 2fff29e38d4..dcc19df3d4e 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellReturnCode.java @@ -517,26 +517,6 @@ public class TestFsShellReturnCode { } return stat; } - - @Override - public FileStatus getFileLinkStatus(Path p) throws IOException { - String f = makeQualified(p).toString(); - FileStatus stat = super.getFileLinkStatus(p); - - stat.getPermission(); - if (owners.containsKey(f)) { - stat.setOwner("STUB-"+owners.get(f)); - } else { - stat.setOwner("REAL-"+stat.getOwner()); - } - if (groups.containsKey(f)) { - stat.setGroup("STUB-"+groups.get(f)); - } else { - stat.setGroup("REAL-"+stat.getGroup()); - } - return stat; - } - } static class MyFsShell extends FsShell { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java index 8f5f14db614..6388bdd9e7a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java @@ -59,9 +59,6 @@ public class TestHDFSFileContextMainOperations extends defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); - // Make defaultWorkingDirectory snapshottable to enable - // testGlobStatusFilterWithHiddenPathTrivialFilter - cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory); } private static void restartCluster() throws IOException, LoginException { @@ -76,9 +73,6 @@ public class TestHDFSFileContextMainOperations extends defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); - // Make defaultWorkingDirectory snapshottable to enable - // testGlobStatusFilterWithHiddenPathTrivialFilter - cluster.getFileSystem().allowSnapshot(defaultWorkingDirectory); } @AfterClass @@ -98,11 +92,6 @@ public class TestHDFSFileContextMainOperations extends super.tearDown(); } - @Override - protected Path getHiddenPathForTest() { - return new Path(defaultWorkingDirectory, ".snapshot"); - } - @Override protected Path getDefaultWorkingDirectory() { return defaultWorkingDirectory; From f35983b8056b7bd9ac4685acabef53f4dd0e355e Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 7 Sep 2013 05:43:04 +0000 Subject: [PATCH 132/153] YARN-1107. Fixed a bug in ResourceManager because of which RM in secure mode fails to restart. Contributed by Omkar Vinit Joshi. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520726 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../resourcemanager/ClientRMService.java | 9 +-- .../server/resourcemanager/RMContext.java | 10 +++ .../server/resourcemanager/RMContextImpl.java | 24 +++++++ .../resourcemanager/ResourceManager.java | 23 +++---- .../security/DelegationTokenRenewer.java | 67 ++++++++++++++----- .../server/resourcemanager/TestRMRestart.java | 38 ++++++++--- .../security/TestDelegationTokenRenewer.java | 35 +++++++++- 8 files changed, 160 insertions(+), 49 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 7fe2e0af20c..9501969ebf5 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -156,6 +156,9 @@ Release 2.1.1-beta - UNRELEASED need more than a node's total capability were incorrectly allocated on that node causing apps to hang. (Omkar Vinit Joshi via vinodkv) + YARN-1107. Fixed a bug in ResourceManager because of which RM in secure mode + fails to restart. (Omkar Vinit Joshi via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index d2888e77da0..11248bad48c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -43,10 +43,10 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; -import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsRequest; +import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsRequest; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterMetricsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetClusterNodesRequest; @@ -78,7 +78,6 @@ import org.apache.hadoop.yarn.api.records.YarnClusterMetrics; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; import org.apache.hadoop.yarn.exceptions.YarnException; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.RPCUtil; @@ -88,7 +87,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstant import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; @@ -160,9 +158,6 @@ public class ClientRMService extends AbstractService implements this.server.start(); clientBindAddress = conf.updateConnectAddr(YarnConfiguration.RM_ADDRESS, server.getListenerAddress()); - // enable RM to short-circuit token operations directly to itself - RMDelegationTokenIdentifier.Renewer.setSecretManager( - rmDTSecretManager, clientBindAddress); super.serviceStart(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java index b29da141ed6..28101cc27ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java @@ -33,6 +33,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSec import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; /** * Context of the ResourceManager. @@ -64,4 +65,13 @@ public interface RMContext { NMTokenSecretManagerInRM getNMTokenSecretManager(); ClientToAMTokenSecretManagerInRM getClientToAMTokenSecretManager(); + + void setClientRMService(ClientRMService clientRMService); + + ClientRMService getClientRMService(); + + RMDelegationTokenSecretManager getRMDelegationTokenSecretManager(); + + void setRMDelegationTokenSecretManager( + RMDelegationTokenSecretManager delegationTokenSecretManager); } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java index f40453b5bbe..d2592ed445c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContextImpl.java @@ -36,6 +36,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSec import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMDelegationTokenSecretManager; import com.google.common.annotations.VisibleForTesting; @@ -61,6 +62,8 @@ public class RMContextImpl implements RMContext { private final RMContainerTokenSecretManager containerTokenSecretManager; private final NMTokenSecretManagerInRM nmTokenSecretManager; private final ClientToAMTokenSecretManagerInRM clientToAMTokenSecretManager; + private ClientRMService clientRMService; + private RMDelegationTokenSecretManager rmDelegationTokenSecretManager; public RMContextImpl(Dispatcher rmDispatcher, RMStateStore store, @@ -178,4 +181,25 @@ public class RMContextImpl implements RMContext { public void setStateStore(RMStateStore store) { stateStore = store; } + + @Override + public ClientRMService getClientRMService() { + return this.clientRMService; + } + + @Override + public void setClientRMService(ClientRMService clientRMService) { + this.clientRMService = clientRMService; + } + + @Override + public RMDelegationTokenSecretManager getRMDelegationTokenSecretManager() { + return this.rmDelegationTokenSecretManager; + } + + @Override + public void setRMDelegationTokenSecretManager( + RMDelegationTokenSecretManager delegationTokenSecretManager) { + this.rmDelegationTokenSecretManager = delegationTokenSecretManager; + } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index c0b372a753c..ee418c1937c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -169,11 +169,6 @@ public class ResourceManager extends CompositeService implements Recoverable { AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); addService(amFinishingMonitor); - if (UserGroupInformation.isSecurityEnabled()) { - this.delegationTokenRenewer = createDelegationTokenRenewer(); - addService(delegationTokenRenewer); - } - this.containerTokenSecretManager = createContainerTokenSecretManager(conf); this.nmTokenSecretManager = createNMTokenSecretManager(conf); @@ -200,6 +195,10 @@ public class ResourceManager extends CompositeService implements Recoverable { ExitUtil.terminate(1, e); } + if (UserGroupInformation.isSecurityEnabled()) { + this.delegationTokenRenewer = createDelegationTokenRenewer(); + } + this.rmContext = new RMContextImpl(this.rmDispatcher, rmStore, this.containerAllocationExpirer, amLivelinessMonitor, @@ -260,7 +259,9 @@ public class ResourceManager extends CompositeService implements Recoverable { this.rmDispatcher.register(RMAppManagerEventType.class, this.rmAppManager); this.rmDTSecretManager = createRMDelegationTokenSecretManager(this.rmContext); + rmContext.setRMDelegationTokenSecretManager(this.rmDTSecretManager); clientRM = createClientRMService(); + rmContext.setClientRMService(clientRM); addService(clientRM); adminService = createAdminService(clientRM, masterService, resourceTracker); @@ -271,7 +272,10 @@ public class ResourceManager extends CompositeService implements Recoverable { this.applicationMasterLauncher); addService(applicationMasterLauncher); - + if (UserGroupInformation.isSecurityEnabled()) { + addService(delegationTokenRenewer); + delegationTokenRenewer.setRMContext(rmContext); + } new RMNMInfo(this.rmContext, this.scheduler); super.serviceInit(conf); @@ -620,13 +624,6 @@ public class ResourceManager extends CompositeService implements Recoverable { this.containerTokenSecretManager.start(); this.nmTokenSecretManager.start(); - // Explicitly start DTRenewer too in secure mode before kicking recovery as - // tokens will start getting added for renewal as part of the recovery - // process itself. - if (UserGroupInformation.isSecurityEnabled()) { - this.delegationTokenRenewer.start(); - } - RMStateStore rmStore = rmContext.getStateStore(); // The state store needs to start irrespective of recoveryEnabled as apps // need events to move to further states. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index bad5eabd407..a58b9175f8e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -34,6 +34,7 @@ import java.util.TimerTask; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ConcurrentMap; import java.util.concurrent.LinkedBlockingQueue; +import java.util.concurrent.locks.ReentrantReadWriteLock; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -47,6 +48,8 @@ import org.apache.hadoop.service.AbstractService; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import com.google.common.annotations.VisibleForTesting; @@ -64,6 +67,7 @@ public class DelegationTokenRenewer extends AbstractService { // global single timer (daemon) private Timer renewalTimer; + private RMContext rmContext; // delegation token canceler thread private DelegationTokenCancelThread dtCancelThread = @@ -80,6 +84,9 @@ public class DelegationTokenRenewer extends AbstractService { private long tokenRemovalDelayMs; private Thread delayedRemovalThread; + private boolean isServiceStarted = false; + private List pendingTokenForRenewal = + new ArrayList(); private boolean tokenKeepAliveEnabled; @@ -100,7 +107,6 @@ public class DelegationTokenRenewer extends AbstractService { @Override protected void serviceStart() throws Exception { - dtCancelThread.start(); renewalTimer = new Timer(true); if (tokenKeepAliveEnabled) { @@ -109,6 +115,15 @@ public class DelegationTokenRenewer extends AbstractService { "DelayedTokenCanceller"); delayedRemovalThread.start(); } + // enable RM to short-circuit token operations directly to itself + RMDelegationTokenIdentifier.Renewer.setSecretManager( + rmContext.getRMDelegationTokenSecretManager(), + rmContext.getClientRMService().getBindAddress()); + // Delegation token renewal is delayed until ClientRMService starts. As + // it is required to short circuit the token renewal calls. + isServiceStarted = true; + renewIfServiceIsStarted(pendingTokenForRenewal); + pendingTokenForRenewal.clear(); super.serviceStart(); } @@ -275,8 +290,8 @@ public class DelegationTokenRenewer extends AbstractService { * @throws IOException */ public void addApplication( - ApplicationId applicationId, Credentials ts, boolean shouldCancelAtEnd) - throws IOException { + ApplicationId applicationId, Credentials ts, boolean shouldCancelAtEnd) + throws IOException { if (ts == null) { return; //nothing to add } @@ -291,25 +306,40 @@ public class DelegationTokenRenewer extends AbstractService { // find tokens for renewal, but don't add timers until we know // all renewable tokens are valid - Set dtrs = new HashSet(); + // At RM restart it is safe to assume that all the previously added tokens + // are valid + List tokenList = + new ArrayList(); for(Token token : tokens) { - // first renew happens immediately if (token.isManaged()) { - DelegationTokenToRenew dtr = - new DelegationTokenToRenew(applicationId, token, getConfig(), now, - shouldCancelAtEnd); - renewToken(dtr); - dtrs.add(dtr); + tokenList.add(new DelegationTokenToRenew(applicationId, + token, getConfig(), now, shouldCancelAtEnd)); } } - for (DelegationTokenToRenew dtr : dtrs) { - addTokenToList(dtr); - setTimerForTokenRenewal(dtr); - if (LOG.isDebugEnabled()) { - LOG.debug("Registering token for renewal for:" + - " service = " + dtr.token.getService() + - " for appId = " + applicationId); + if (!tokenList.isEmpty()){ + renewIfServiceIsStarted(tokenList); + } + } + + protected void renewIfServiceIsStarted(List dtrs) + throws IOException { + if (isServiceStarted) { + // Renewing token and adding it to timer calls are separated purposefully + // If user provides incorrect token then it should not be added for + // renewal. + for (DelegationTokenToRenew dtr : dtrs) { + renewToken(dtr); } + for (DelegationTokenToRenew dtr : dtrs) { + addTokenToList(dtr); + setTimerForTokenRenewal(dtr); + if (LOG.isDebugEnabled()) { + LOG.debug("Registering token for renewal for:" + " service = " + + dtr.token.getService() + " for appId = " + dtr.applicationId); + } + } + } else { + pendingTokenForRenewal.addAll(dtrs); } } @@ -513,4 +543,7 @@ public class DelegationTokenRenewer extends AbstractService { } } + public void setRMContext(RMContext rmContext) { + this.rmContext = rmContext; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index c0f480bd1c4..fbf53267dff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -19,7 +19,6 @@ package org.apache.hadoop.yarn.server.resourcemanager; import java.io.IOException; -import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.UnknownHostException; import java.nio.ByteBuffer; @@ -35,6 +34,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.SaslRpcServer.AuthMethod; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; @@ -90,7 +90,7 @@ public class TestRMRestart { conf.set(YarnConfiguration.RECOVERY_ENABLED, "true"); conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName()); - rmAddr = new InetSocketAddress(InetAddress.getLocalHost(), 123); + rmAddr = new InetSocketAddress("localhost", 8032); } @Test (timeout=180000) @@ -592,7 +592,12 @@ public class TestRMRestart { @Test public void testRMDelegationTokenRestoredOnRMRestart() throws Exception { conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2); - + + conf.set( + CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + conf.set(YarnConfiguration.RM_ADDRESS, "localhost:8032"); + UserGroupInformation.setConfiguration(conf); MemoryRMStateStore memStore = new MemoryRMStateStore(); memStore.init(conf); RMState rmState = memStore.getState(); @@ -614,6 +619,8 @@ public class TestRMRestart { // request a token and add into credential GetDelegationTokenRequest request1 = GetDelegationTokenRequest.newInstance("renewer1"); + UserGroupInformation.getCurrentUser().setAuthenticationMethod( + AuthMethod.KERBEROS); GetDelegationTokenResponse response1 = rm1.getClientRMService().getDelegationToken(request1); org.apache.hadoop.yarn.api.records.Token delegationToken1 = @@ -644,7 +651,7 @@ public class TestRMRestart { rm1.getRMDTSecretManager().getAllTokens(); Assert.assertEquals(tokenIdentSet, allTokensRM1.keySet()); Assert.assertEquals(allTokensRM1, rmDTState); - + // assert sequence number is saved Assert.assertEquals( rm1.getRMDTSecretManager().getLatestDTSequenceNumber(), @@ -682,7 +689,7 @@ public class TestRMRestart { // assert master keys and tokens are populated back to DTSecretManager Map allTokensRM2 = rm2.getRMDTSecretManager().getAllTokens(); - Assert.assertEquals(allTokensRM1, allTokensRM2); + Assert.assertEquals(allTokensRM2.keySet(), allTokensRM1.keySet()); // rm2 has its own master keys when it starts, we use containsAll here Assert.assertTrue(rm2.getRMDTSecretManager().getAllMasterKeys() .containsAll(allKeysRM1)); @@ -735,15 +742,24 @@ public class TestRMRestart { } @Override - protected void doSecureLogin() throws IOException { - // Do nothing. + protected ClientRMService createClientRMService() { + return new ClientRMService(getRMContext(), getResourceScheduler(), + rmAppManager, applicationACLsManager, rmDTSecretManager){ + @Override + protected void serviceStart() throws Exception { + // do nothing + } + + @Override + protected void serviceStop() throws Exception { + //do nothing + } + }; } @Override - protected void serviceInit(Configuration conf) throws Exception { - super.serviceInit(conf); - RMDelegationTokenIdentifier.Renewer.setSecretManager( - this.getRMDTSecretManager(), rmAddr); + protected void doSecureLogin() throws IOException { + // Do nothing. } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 38580d5dab5..98e6ab0f1b7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -25,8 +25,10 @@ import static org.mockito.Matchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; +import java.net.InetSocketAddress; import java.net.URI; import java.net.URISyntaxException; import java.util.Collections; @@ -48,9 +50,12 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenRenewer; import org.apache.hadoop.security.token.delegation.DelegationKey; +import org.apache.hadoop.service.Service.STATE; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.resourcemanager.ClientRMService; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.junit.After; import org.junit.Before; @@ -141,6 +146,13 @@ public class TestDelegationTokenRenewer { Renewer.reset(); delegationTokenRenewer = new DelegationTokenRenewer(); delegationTokenRenewer.init(conf); + RMContext mockContext = mock(RMContext.class); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + delegationTokenRenewer.setRMContext(mockContext); delegationTokenRenewer.start(); } @@ -454,6 +466,13 @@ public class TestDelegationTokenRenewer { YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, 1000l); localDtr.init(lconf); + RMContext mockContext = mock(RMContext.class); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + localDtr.setRMContext(mockContext); localDtr.start(); MyFS dfs = (MyFS)FileSystem.get(lconf); @@ -511,6 +530,13 @@ public class TestDelegationTokenRenewer { YarnConfiguration.RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS, 1000l); localDtr.init(lconf); + RMContext mockContext = mock(RMContext.class); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + localDtr.setRMContext(mockContext); localDtr.start(); MyFS dfs = (MyFS)FileSystem.get(lconf); @@ -550,7 +576,7 @@ public class TestDelegationTokenRenewer { } catch (InvalidToken ite) {} } - @Test(timeout=2000) + @Test(timeout=20000) public void testConncurrentAddApplication() throws IOException, InterruptedException, BrokenBarrierException { final CyclicBarrier startBarrier = new CyclicBarrier(2); @@ -579,6 +605,13 @@ public class TestDelegationTokenRenewer { // fire up the renewer final DelegationTokenRenewer dtr = new DelegationTokenRenewer(); dtr.init(conf); + RMContext mockContext = mock(RMContext.class); + ClientRMService mockClientRMService = mock(ClientRMService.class); + when(mockContext.getClientRMService()).thenReturn(mockClientRMService); + InetSocketAddress sockAddr = + InetSocketAddress.createUnresolved("localhost", 1234); + when(mockClientRMService.getBindAddress()).thenReturn(sockAddr); + dtr.setRMContext(mockContext); dtr.start(); // submit a job that blocks during renewal From 03ae82a311c033318496111d3dbd77bbaaa990b6 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Sat, 7 Sep 2013 07:28:19 +0000 Subject: [PATCH 133/153] YARN-696. Changed RMWebservice apps call to take in multiple application states. Contributed by Trevor Lorimer. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520736 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../resourcemanager/webapp/RMWebServices.java | 42 ++++- .../webapp/TestRMWebServicesApps.java | 167 +++++++++++++++++- .../src/site/apt/ResourceManagerRest.apt.vm | 5 +- 4 files changed, 203 insertions(+), 14 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 9501969ebf5..25607864512 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -89,6 +89,9 @@ Release 2.1.1-beta - UNRELEASED YARN-758. Augment MockNM to use multiple cores (Karthik Kambatla via Sandy Ryza) + YARN-696. Changed RMWebservice apps call to take in multiple application + states. (Trevor Lorimer via vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index a9b1523eeae..a5c2f441e05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.webapp; import java.io.IOException; +import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; import java.util.HashSet; @@ -231,6 +232,7 @@ public class RMWebServices { @Produces({ MediaType.APPLICATION_JSON, MediaType.APPLICATION_XML }) public AppsInfo getApps(@Context HttpServletRequest hsr, @QueryParam("state") String stateQuery, + @QueryParam("states") Set statesQuery, @QueryParam("finalStatus") String finalStatusQuery, @QueryParam("user") String userQuery, @QueryParam("queue") String queueQuery, @@ -245,6 +247,7 @@ public class RMWebServices { boolean checkStart = false; boolean checkEnd = false; boolean checkAppTypes = false; + boolean checkAppStates = false; long countNum = 0; // set values suitable in case both of begin/end not specified @@ -321,6 +324,36 @@ public class RMWebServices { checkAppTypes = true; } + String allAppStates; + RMAppState[] stateArray = RMAppState.values(); + allAppStates = Arrays.toString(stateArray); + + Set appStates = new HashSet(); + // stateQuery is deprecated. + if (stateQuery != null && !stateQuery.isEmpty()) { + statesQuery.add(stateQuery); + } + if (!statesQuery.isEmpty()) { + for (String applicationState : statesQuery) { + if (applicationState != null && !applicationState.isEmpty()) { + String[] states = applicationState.split(","); + for (String state : states) { + try { + RMAppState.valueOf(state.trim()); + } catch (IllegalArgumentException iae) { + throw new BadRequestException( + "Invalid application-state " + state + + " specified. It should be one of " + allAppStates); + } + appStates.add(state.trim().toLowerCase()); + } + } + } + } + if (!appStates.isEmpty()) { + checkAppStates = true; + } + final ConcurrentMap apps = rm.getRMContext() .getRMApps(); AppsInfo allApps = new AppsInfo(); @@ -329,11 +362,10 @@ public class RMWebServices { if (checkCount && num == countNum) { break; } - if (stateQuery != null && !stateQuery.isEmpty()) { - RMAppState.valueOf(stateQuery); - if (!rmapp.getState().toString().equalsIgnoreCase(stateQuery)) { - continue; - } + + if (checkAppStates + && !appStates.contains(rmapp.getState().toString().toLowerCase())) { + continue; } if (finalStatusQuery != null && !finalStatusQuery.isEmpty()) { FinalApplicationStatus.valueOf(finalStatusQuery); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index 52f72d8692e..5e7145283ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -68,6 +68,7 @@ import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.ClientResponse.Status; import com.sun.jersey.api.client.UniformInterfaceException; import com.sun.jersey.api.client.WebResource; +import com.sun.jersey.core.util.MultivaluedMapImpl; import com.sun.jersey.guice.spi.container.servlet.GuiceContainer; import com.sun.jersey.test.framework.JerseyTest; import com.sun.jersey.test.framework.WebAppDescriptor; @@ -239,6 +240,122 @@ public class TestRMWebServicesApps extends JerseyTest { rm.stop(); } + @Test + public void testAppsQueryStates() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048); + rm.submitApp(1024); + RMApp killedApp = rm.submitApp(1024); + rm.killApp(killedApp.getApplicationId()); + + amNodeManager.nodeHeartbeat(true); + + WebResource r = resource(); + MultivaluedMapImpl params = new MultivaluedMapImpl(); + params.add("states", RMAppState.ACCEPTED.toString()); + ClientResponse response = r.path("ws").path("v1").path("cluster") + .path("apps").queryParams(params) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("incorrect number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("incorrect number of elements", 1, array.length()); + assertEquals("state not equal to ACCEPTED", "ACCEPTED", array + .getJSONObject(0).getString("state")); + + r = resource(); + params = new MultivaluedMapImpl(); + params.add("states", RMAppState.ACCEPTED.toString()); + params.add("states", RMAppState.KILLED.toString()); + response = r.path("ws").path("v1").path("cluster") + .path("apps").queryParams(params) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + apps = json.getJSONObject("apps"); + assertEquals("incorrect number of elements", 1, apps.length()); + array = apps.getJSONArray("app"); + assertEquals("incorrect number of elements", 2, array.length()); + assertTrue("both app states of ACCEPTED and KILLED are not present", + (array.getJSONObject(0).getString("state").equals("ACCEPTED") && + array.getJSONObject(1).getString("state").equals("KILLED")) || + (array.getJSONObject(0).getString("state").equals("KILLED") && + array.getJSONObject(1).getString("state").equals("ACCEPTED"))); + + rm.stop(); + } + + @Test + public void testAppsQueryStatesComma() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048); + rm.submitApp(1024); + RMApp killedApp = rm.submitApp(1024); + rm.killApp(killedApp.getApplicationId()); + + amNodeManager.nodeHeartbeat(true); + + WebResource r = resource(); + MultivaluedMapImpl params = new MultivaluedMapImpl(); + params.add("states", RMAppState.ACCEPTED.toString()); + ClientResponse response = r.path("ws").path("v1").path("cluster") + .path("apps").queryParams(params) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + JSONObject apps = json.getJSONObject("apps"); + assertEquals("incorrect number of elements", 1, apps.length()); + JSONArray array = apps.getJSONArray("app"); + assertEquals("incorrect number of elements", 1, array.length()); + assertEquals("state not equal to ACCEPTED", "ACCEPTED", array + .getJSONObject(0).getString("state")); + + r = resource(); + params = new MultivaluedMapImpl(); + params.add("states", RMAppState.ACCEPTED.toString() + "," + + RMAppState.KILLED.toString()); + response = r.path("ws").path("v1").path("cluster") + .path("apps").queryParams(params) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + apps = json.getJSONObject("apps"); + assertEquals("incorrect number of elements", 1, apps.length()); + array = apps.getJSONArray("app"); + assertEquals("incorrect number of elements", 2, array.length()); + assertTrue("both app states of ACCEPTED and KILLED are not present", + (array.getJSONObject(0).getString("state").equals("ACCEPTED") && + array.getJSONObject(1).getString("state").equals("KILLED")) || + (array.getJSONObject(0).getString("state").equals("KILLED") && + array.getJSONObject(1).getString("state").equals("ACCEPTED"))); + + rm.stop(); + } + + @Test + public void testAppsQueryStatesNone() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048); + rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + + ClientResponse response = r.path("ws").path("v1").path("cluster") + .path("apps").queryParam("states", RMAppState.RUNNING.toString()) + .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject json = response.getEntity(JSONObject.class); + assertEquals("incorrect number of elements", 1, json.length()); + assertEquals("apps is not null", JSONObject.NULL, json.get("apps")); + rm.stop(); + } + @Test public void testAppsQueryStateNone() throws JSONException, Exception { rm.start(); @@ -257,6 +374,43 @@ public class TestRMWebServicesApps extends JerseyTest { rm.stop(); } + @Test + public void testAppsQueryStatesInvalid() throws JSONException, Exception { + rm.start(); + MockNM amNodeManager = rm.registerNode("127.0.0.1:1234", 2048); + rm.submitApp(1024); + amNodeManager.nodeHeartbeat(true); + WebResource r = resource(); + + try { + r.path("ws").path("v1").path("cluster").path("apps") + .queryParam("states", "INVALID_test") + .accept(MediaType.APPLICATION_JSON).get(JSONObject.class); + fail("should have thrown exception on invalid state query"); + } catch (UniformInterfaceException ue) { + ClientResponse response = ue.getResponse(); + assertEquals(Status.BAD_REQUEST, response.getClientResponseStatus()); + assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType()); + JSONObject msg = response.getEntity(JSONObject.class); + JSONObject exception = msg.getJSONObject("RemoteException"); + assertEquals("incorrect number of elements", 3, exception.length()); + String message = exception.getString("message"); + String type = exception.getString("exception"); + String classname = exception.getString("javaClassName"); + WebServicesTestUtils.checkStringContains( + "exception message", + "Invalid application-state INVALID_test", + message); + WebServicesTestUtils.checkStringMatch("exception type", + "BadRequestException", type); + WebServicesTestUtils.checkStringMatch("exception classname", + "org.apache.hadoop.yarn.webapp.BadRequestException", classname); + + } finally { + rm.stop(); + } + } + @Test public void testAppsQueryStateInvalid() throws JSONException, Exception { rm.start(); @@ -280,15 +434,14 @@ public class TestRMWebServicesApps extends JerseyTest { String message = exception.getString("message"); String type = exception.getString("exception"); String classname = exception.getString("javaClassName"); - WebServicesTestUtils - .checkStringContains( - "exception message", - "org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState.INVALID_test", - message); + WebServicesTestUtils.checkStringContains( + "exception message", + "Invalid application-state INVALID_test", + message); WebServicesTestUtils.checkStringMatch("exception type", - "IllegalArgumentException", type); + "BadRequestException", type); WebServicesTestUtils.checkStringMatch("exception classname", - "java.lang.IllegalArgumentException", classname); + "org.apache.hadoop.yarn.webapp.BadRequestException", classname); } finally { rm.stop(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm index 5f0580128df..22011124f27 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm @@ -1107,10 +1107,11 @@ ResourceManager REST API's. ** Query Parameters Supported - Multiple paramters can be specified. The started and finished times have a begin and end parameter to allow you to specify ranges. For example, one could request all applications that started between 1:00am and 2:00pm on 12/19/2011 with startedTimeBegin=1324256400&startedTimeEnd=1324303200. If the Begin parameter is not specfied, it defaults to 0, and if the End parameter is not specified, it defaults to infinity. + Multiple parameters can be specified. The started and finished times have a begin and end parameter to allow you to specify ranges. For example, one could request all applications that started between 1:00am and 2:00pm on 12/19/2011 with startedTimeBegin=1324256400&startedTimeEnd=1324303200. If the Begin parameter is not specified, it defaults to 0, and if the End parameter is not specified, it defaults to infinity. ------ - * state - state of the application + * state [deprecated] - state of the application + * states - applications matching the given application states, specified as a comma-separated list. * finalStatus - the final status of the application - reported by the application itself * user - user name * queue - queue name From 40cf0068d8aae2c50c90e0661aaa1170a4c03e42 Mon Sep 17 00:00:00 2001 From: Ivan Mitic Date: Sun, 8 Sep 2013 19:51:46 +0000 Subject: [PATCH 134/153] HADOOP-9924. Addendum patch to address issue with an empty classpath entry on Windows. Contributed by Shanyu Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520903 13f79535-47bb-0310-9956-ffa450edef68 --- .../src/main/java/org/apache/hadoop/fs/FileUtil.java | 3 +++ .../src/test/java/org/apache/hadoop/fs/TestFileUtil.java | 5 ++++- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index bb203422f39..7bb20dd4032 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -1239,6 +1239,9 @@ public class FileUtil { List classPathEntryList = new ArrayList( classPathEntries.length); for (String classPathEntry: classPathEntries) { + if (classPathEntry.length() == 0) { + continue; + } if (classPathEntry.endsWith("*")) { // Append all jars that match the wildcard Path globPath = new Path(classPathEntry).suffix("{.jar,.JAR}"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index 3877e83a9b2..f37bf4f0c04 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -757,7 +757,7 @@ public class TestFileUtil { String wildcardPath = tmp.getCanonicalPath() + File.separator + "*"; String nonExistentSubdir = tmp.getCanonicalPath() + Path.SEPARATOR + "subdir" + Path.SEPARATOR; - List classPaths = Arrays.asList("cp1.jar", "cp2.jar", wildcardPath, + List classPaths = Arrays.asList("", "cp1.jar", "cp2.jar", wildcardPath, "cp3.jar", nonExistentSubdir); String inputClassPath = StringUtils.join(File.pathSeparator, classPaths); String classPathJar = FileUtil.createJarWithClassPath(inputClassPath, @@ -776,6 +776,9 @@ public class TestFileUtil { Assert.assertNotNull(classPathAttr); List expectedClassPaths = new ArrayList(); for (String classPath: classPaths) { + if (classPath.length() == 0) { + continue; + } if (wildcardPath.equals(classPath)) { // add wildcard matches for (File wildcardMatch: wildcardMatches) { From 4f2bf68b73df98f1bb5f80e6e192bd03b935b03b Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Mon, 9 Sep 2013 04:54:57 +0000 Subject: [PATCH 135/153] HDFS-5170. BlockPlacementPolicyDefault uses the wrong classname when alerting to enable debug logging. (Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520961 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../server/blockmanagement/BlockPlacementPolicyDefault.java | 2 +- 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 3dbcde5d619..735dd2634e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -289,6 +289,9 @@ Release 2.3.0 - UNRELEASED HDFS-5164. deleteSnapshot should check if OperationCategory.WRITE is possible before taking write lock. (Colin Patrick McCabe) + HDFS-5170. BlockPlacementPolicyDefault uses the wrong classname when + alerting to enable debug logging. (Andrew Wang) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index af5aab420a3..fbb922351bf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -55,7 +55,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { private static final String enableDebugLogging = "For more information, please enable DEBUG log level on " - + LOG.getClass().getName(); + + BlockPlacementPolicy.class.getName(); protected boolean considerLoad; private boolean preferLocalNode = true; From b2af6c70245703aa6d2ad1beceacc2aa2fcde5c0 Mon Sep 17 00:00:00 2001 From: Devarajulu K Date: Mon, 9 Sep 2013 06:42:42 +0000 Subject: [PATCH 136/153] MAPREDUCE-5414. TestTaskAttempt fails in JDK7 with NPE. Contributed by Nemon Lou. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1520964 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ .../mapreduce/v2/app/job/impl/TestTaskAttempt.java | 12 ++++++------ 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index e3e0f25e83b..13f0079fc3c 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -253,6 +253,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5475. MRClientService does not verify ACLs properly (jlowe) + MAPREDUCE-5414. TestTaskAttempt fails in JDK7 with NPE (Nemon Lou via + devaraj) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 885e01106d5..1129c2fcfc4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -343,7 +343,7 @@ public class TestTaskAttempt{ TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, - mock(Token.class), new Credentials(), + new Token(), new Credentials(), new SystemClock(), null); NodeId nid = NodeId.newInstance("127.0.0.1", 0); @@ -399,7 +399,7 @@ public class TestTaskAttempt{ TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, - mock(Token.class), new Credentials(), + new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.2", 0); @@ -456,7 +456,7 @@ public class TestTaskAttempt{ TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, - mock(Token.class), new Credentials(), + new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); @@ -516,7 +516,7 @@ public class TestTaskAttempt{ TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, - mock(Token.class), new Credentials(), + new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); @@ -582,7 +582,7 @@ public class TestTaskAttempt{ TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, - mock(Token.class), new Credentials(), new SystemClock(), appCtx); + new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newInstance(appAttemptId, 3); @@ -631,7 +631,7 @@ public class TestTaskAttempt{ TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, - mock(Token.class), new Credentials(), new SystemClock(), appCtx); + new Token(), new Credentials(), new SystemClock(), appCtx); NodeId nid = NodeId.newInstance("127.0.0.1", 0); ContainerId contId = ContainerId.newInstance(appAttemptId, 3); From 1cd7b067f7aebda201541e309ba27fc28e0b16db Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Mon, 9 Sep 2013 11:07:49 +0000 Subject: [PATCH 137/153] YARN-1049. ContainerExistStatus should define a status for preempted containers. (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521036 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../yarn/api/records/ContainerExitStatus.java | 5 +++ .../scheduler/SchedulerUtils.java | 39 ++++++++++++++++--- .../scheduler/capacity/CapacityScheduler.java | 2 +- .../scheduler/fair/FairScheduler.java | 2 +- .../scheduler/TestSchedulerUtils.java | 20 ++++++++++ 6 files changed, 64 insertions(+), 7 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 25607864512..c20d715a3e4 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -162,6 +162,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1107. Fixed a bug in ResourceManager because of which RM in secure mode fails to restart. (Omkar Vinit Joshi via vinodkv) + YARN-1049. ContainerExistStatus should define a status for preempted + containers. (tucu) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java index 76cb6c19581..71225787aeb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerExitStatus.java @@ -41,4 +41,9 @@ public class ContainerExitStatus { * threshold number of the nodemanager-log-directories become bad. */ public static final int DISKS_FAILED = -101; + + /** + * Containers preempted by the framework. + */ + public static final int PREEMPTED = -102; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index b0cb25e5c75..ef71dccf510 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -63,7 +63,37 @@ public class SchedulerUtils { public static final String UNRESERVED_CONTAINER = "Container reservation no longer required."; - + + /** + * Utility to create a {@link ContainerStatus} during exceptional + * circumstances. + * + * @param containerId {@link ContainerId} of returned/released/lost container. + * @param diagnostics diagnostic message + * @return ContainerStatus for an returned/released/lost + * container + */ + public static ContainerStatus createAbnormalContainerStatus( + ContainerId containerId, String diagnostics) { + return createAbnormalContainerStatus(containerId, + ContainerExitStatus.ABORTED, diagnostics); + } + + /** + * Utility to create a {@link ContainerStatus} during exceptional + * circumstances. + * + * @param containerId {@link ContainerId} of returned/released/lost container. + * @param diagnostics diagnostic message + * @return ContainerStatus for an returned/released/lost + * container + */ + public static ContainerStatus createPreemptedContainerStatus( + ContainerId containerId, String diagnostics) { + return createAbnormalContainerStatus(containerId, + ContainerExitStatus.PREEMPTED, diagnostics); + } + /** * Utility to create a {@link ContainerStatus} during exceptional * circumstances. @@ -73,14 +103,13 @@ public class SchedulerUtils { * @return ContainerStatus for an returned/released/lost * container */ - public static ContainerStatus createAbnormalContainerStatus( - ContainerId containerId, String diagnostics) { + private static ContainerStatus createAbnormalContainerStatus( + ContainerId containerId, int exitStatus, String diagnostics) { ContainerStatus containerStatus = recordFactory.newRecordInstance(ContainerStatus.class); containerStatus.setContainerId(containerId); containerStatus.setDiagnostics(diagnostics); - containerStatus.setExitStatus( - ContainerExitStatus.ABORTED); + containerStatus.setExitStatus(exitStatus); containerStatus.setState(ContainerState.COMPLETE); return containerStatus; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 2efb9ad6719..bbf7f5cf699 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -904,7 +904,7 @@ public class CapacityScheduler LOG.debug("KILL_CONTAINER: container" + cont.toString()); } completedContainer(cont, - SchedulerUtils.createAbnormalContainerStatus( + SchedulerUtils.createPreemptedContainerStatus( cont.getContainerId(),"Container being forcibly preempted:" + cont.getContainerId()), RMContainerEventType.KILL); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 7f315781ef9..72000e91fc3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -444,7 +444,7 @@ public class FairScheduler implements ResourceScheduler { // proceed with kill if (time + waitTimeBeforeKill < clock.getTime()) { ContainerStatus status = - SchedulerUtils.createAbnormalContainerStatus( + SchedulerUtils.createPreemptedContainerStatus( container.getContainerId(), SchedulerUtils.PREEMPTED_CONTAINER); // TODO: Not sure if this ever actually adds this to the list of cleanup diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index 9661057d133..9969db5a5e3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -41,6 +41,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceBlacklistRequest; @@ -358,4 +362,20 @@ public class TestSchedulerUtils { Priority low = Priority.newInstance(2); assertTrue(high.compareTo(low) > 0); } + + @Test + public void testCreateAbnormalContainerStatus() { + ContainerStatus cd = SchedulerUtils.createAbnormalContainerStatus( + ContainerId.newInstance(ApplicationAttemptId.newInstance( + ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); + Assert.assertEquals(ContainerExitStatus.ABORTED, cd.getExitStatus()); + } + + @Test + public void testCreatePreemptedContainerStatus() { + ContainerStatus cd = SchedulerUtils.createPreemptedContainerStatus( + ContainerId.newInstance(ApplicationAttemptId.newInstance( + ApplicationId.newInstance(System.currentTimeMillis(), 1), 1), 1), "x"); + Assert.assertEquals(ContainerExitStatus.PREEMPTED, cd.getExitStatus()); + } } From f2e0a125f44e2a529ac2ff74feb655741fed56ba Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Mon, 9 Sep 2013 11:12:45 +0000 Subject: [PATCH 138/153] YARN-1144. Unmanaged AMs registering a tracking URI should not be proxy-fied. (tucu) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521039 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 + .../rmapp/attempt/RMAppAttemptImpl.java | 5 +- .../rmapp/attempt/TestRMAppAttemptImpl.java | 77 +++++++++++++++++++ .../attempt/TestRMAppAttemptTransitions.java | 56 ++++++++------ 4 files changed, 116 insertions(+), 25 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImpl.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index c20d715a3e4..17e53641ff0 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -165,6 +165,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1049. ContainerExistStatus should define a status for preempted containers. (tucu) + YARN-1144. Unmanaged AMs registering a tracking URI should not be + proxy-fied. (tucu) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 00397cfa650..51e6dc9f10d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -440,7 +440,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { public String getTrackingUrl() { this.readLock.lock(); try { - return this.proxiedTrackingUrl; + return (getSubmissionContext().getUnmanagedAM()) ? + this.origTrackingUrl : this.proxiedTrackingUrl; } finally { this.readLock.unlock(); } @@ -961,7 +962,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { } } - private static final class AMRegisteredTransition extends BaseTransition { + static final class AMRegisteredTransition extends BaseTransition { @Override public void transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImpl.java new file mode 100644 index 00000000000..e69d867e0f4 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImpl.java @@ -0,0 +1,77 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt; + +import junit.framework.Assert; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent; + +import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; +import org.junit.Test; +import org.mockito.Mockito; + +public class TestRMAppAttemptImpl { + + private void testTrackingUrl(String url, boolean unmanaged) { + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance + (ApplicationId.newInstance(1, 2), 1); + EventHandler handler = Mockito.mock(EventHandler.class); + Dispatcher dispatcher = Mockito.mock(Dispatcher.class); + Mockito.when(dispatcher.getEventHandler()).thenReturn(handler); + RMContext rmContext = Mockito.mock(RMContext.class); + Mockito.when(rmContext.getDispatcher()).thenReturn(dispatcher); + + ApplicationSubmissionContext appContext = + Mockito.mock(ApplicationSubmissionContext.class); + Mockito.when(appContext.getUnmanagedAM()).thenReturn(unmanaged); + + RMAppAttemptImpl attempt = new RMAppAttemptImpl(attemptId, rmContext, null, + null, appContext, new YarnConfiguration(), null); + RMAppAttemptRegistrationEvent event = + Mockito.mock(RMAppAttemptRegistrationEvent.class); + Mockito.when(event.getHost()).thenReturn("h"); + Mockito.when(event.getRpcport()).thenReturn(0); + Mockito.when(event.getTrackingurl()).thenReturn(url); + new RMAppAttemptImpl.AMRegisteredTransition().transition(attempt, event); + if (unmanaged) { + Assert.assertEquals(url, attempt.getTrackingUrl()); + } else { + Assert.assertNotSame(url, attempt.getTrackingUrl()); + Assert.assertTrue(attempt.getTrackingUrl().contains( + ProxyUriUtils.PROXY_SERVLET_NAME)); + Assert.assertTrue(attempt.getTrackingUrl().contains( + attemptId.getApplicationId().toString())); + } + } + + @Test + public void testTrackingUrlUnmanagedAM() { + testTrackingUrl("http://foo:8000/x", true); + } + + @Test + public void testTrackingUrlManagedAM() { + testTrackingUrl("bar:8000/x", false); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index 5261d077d5c..2290284eaa8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -408,16 +408,19 @@ public class TestRMAppAttemptTransitions { * {@link RMAppAttemptState#RUNNING} */ private void testAppAttemptRunningState(Container container, - String host, int rpcPort, String trackingUrl) { + String host, int rpcPort, String trackingUrl, boolean unmanagedAM) { assertEquals(RMAppAttemptState.RUNNING, applicationAttempt.getAppAttemptState()); assertEquals(container, applicationAttempt.getMasterContainer()); assertEquals(host, applicationAttempt.getHost()); assertEquals(rpcPort, applicationAttempt.getRpcPort()); assertEquals(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); - assertEquals(getProxyUrl(applicationAttempt), - applicationAttempt.getTrackingUrl()); - + if (unmanagedAM) { + assertEquals("oldtrackingurl", applicationAttempt.getTrackingUrl()); + } else { + assertEquals(getProxyUrl(applicationAttempt), + applicationAttempt.getTrackingUrl()); + } // TODO - need to add more checks relevant to this state } @@ -446,13 +449,18 @@ public class TestRMAppAttemptTransitions { FinalApplicationStatus finalStatus, String trackingUrl, String diagnostics, - int finishedContainerCount) { + int finishedContainerCount, boolean unmanagedAM) { assertEquals(RMAppAttemptState.FINISHED, applicationAttempt.getAppAttemptState()); assertEquals(diagnostics, applicationAttempt.getDiagnostics()); assertEquals(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); - assertEquals(getProxyUrl(applicationAttempt), - applicationAttempt.getTrackingUrl()); + if (unmanagedAM) { + assertEquals("mytrackingurl", applicationAttempt.getTrackingUrl()); + + } else { + assertEquals(getProxyUrl(applicationAttempt), + applicationAttempt.getTrackingUrl()); + } assertEquals(finishedContainerCount, applicationAttempt .getJustFinishedContainers().size()); assertEquals(container, applicationAttempt.getMasterContainer()); @@ -535,13 +543,14 @@ public class TestRMAppAttemptTransitions { private void runApplicationAttempt(Container container, String host, int rpcPort, - String trackingUrl) { + String trackingUrl, boolean unmanagedAM) { applicationAttempt.handle( new RMAppAttemptRegistrationEvent( applicationAttempt.getAppAttemptId(), host, rpcPort, trackingUrl)); - testAppAttemptRunningState(container, host, rpcPort, trackingUrl); + testAppAttemptRunningState(container, host, rpcPort, trackingUrl, + unmanagedAM); } private void unregisterApplicationAttempt(Container container, @@ -567,7 +576,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId()); // launch AM - runApplicationAttempt(null, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(null, "host", 8042, "oldtrackingurl", true); // complete a container applicationAttempt.handle(new RMAppAttemptContainerAcquiredEvent( @@ -581,7 +590,8 @@ public class TestRMAppAttemptTransitions { applicationAttempt.handle(new RMAppAttemptUnregistrationEvent( applicationAttempt.getAppAttemptId(), trackingUrl, finalStatus, diagnostics)); - testAppAttemptFinishedState(null, finalStatus, trackingUrl, diagnostics, 1); + testAppAttemptFinishedState(null, finalStatus, trackingUrl, diagnostics, 1, + true); } @Test @@ -690,7 +700,7 @@ public class TestRMAppAttemptTransitions { public void testRunningToFailed() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); String containerDiagMsg = "some error"; int exitCode = 123; ContainerStatus cs = BuilderUtils.newContainerStatus(amContainer.getId(), @@ -713,7 +723,7 @@ public class TestRMAppAttemptTransitions { public void testRunningToKilled() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); applicationAttempt.handle( new RMAppAttemptEvent( applicationAttempt.getAppAttemptId(), @@ -751,7 +761,7 @@ public class TestRMAppAttemptTransitions { public void testRunningExpire() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); applicationAttempt.handle(new RMAppAttemptEvent( applicationAttempt.getAppAttemptId(), RMAppAttemptEventType.EXPIRE)); assertEquals(RMAppAttemptState.FAILED, @@ -769,7 +779,7 @@ public class TestRMAppAttemptTransitions { public void testUnregisterToKilledFinishing() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); unregisterApplicationAttempt(amContainer, FinalApplicationStatus.KILLED, "newtrackingurl", "Killed by user"); @@ -780,14 +790,14 @@ public class TestRMAppAttemptTransitions { public void testNoTrackingUrl() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, ""); + runApplicationAttempt(amContainer, "host", 8042, "", false); } @Test public void testUnregisterToSuccessfulFinishing() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); unregisterApplicationAttempt(amContainer, FinalApplicationStatus.SUCCEEDED, "mytrackingurl", "Successful"); } @@ -796,7 +806,7 @@ public class TestRMAppAttemptTransitions { public void testFinishingKill() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); FinalApplicationStatus finalStatus = FinalApplicationStatus.FAILED; String trackingUrl = "newtrackingurl"; String diagnostics = "Job failed"; @@ -814,7 +824,7 @@ public class TestRMAppAttemptTransitions { public void testFinishingExpire() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED; String trackingUrl = "mytrackingurl"; String diagnostics = "Successful"; @@ -825,14 +835,14 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId(), RMAppAttemptEventType.EXPIRE)); testAppAttemptFinishedState(amContainer, finalStatus, trackingUrl, - diagnostics, 0); + diagnostics, 0, false); } @Test public void testFinishingToFinishing() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED; String trackingUrl = "mytrackingurl"; String diagnostics = "Successful"; @@ -854,7 +864,7 @@ public class TestRMAppAttemptTransitions { public void testSuccessfulFinishingToFinished() { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl"); + runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl", false); FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED; String trackingUrl = "mytrackingurl"; String diagnostics = "Successful"; @@ -866,7 +876,7 @@ public class TestRMAppAttemptTransitions { BuilderUtils.newContainerStatus(amContainer.getId(), ContainerState.COMPLETE, "", 0))); testAppAttemptFinishedState(amContainer, finalStatus, trackingUrl, - diagnostics, 0); + diagnostics, 0, false); } private void verifyTokenCount(ApplicationAttemptId appAttemptId, int count) { From 1a649aa51a3fe291b21bd2bb45d84fcd1806c521 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 9 Sep 2013 21:19:07 +0000 Subject: [PATCH 139/153] YARN-1152. Fixed a bug in ResourceManager that was causing clients to get invalid client token key errors when an appliation is about to finish. Contributed by Jason Lowe. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521292 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 ++ .../resourcemanager/rmapp/RMAppImpl.java | 22 +++--- .../rmapp/attempt/RMAppAttempt.java | 8 +++ .../rmapp/attempt/RMAppAttemptImpl.java | 22 ++++++ .../rmapp/TestRMAppTransitions.java | 68 +++++++++++++++++-- .../attempt/TestRMAppAttemptTransitions.java | 31 +++++++++ 6 files changed, 138 insertions(+), 17 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 17e53641ff0..03747b3098f 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -168,6 +168,10 @@ Release 2.1.1-beta - UNRELEASED YARN-1144. Unmanaged AMs registering a tracking URI should not be proxy-fied. (tucu) + YARN-1152. Fixed a bug in ResourceManager that was causing clients to get + invalid client token key errors when an appliation is about to finish. + (Jason Lowe via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index c69aed3473f..cbafffe04c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -432,18 +432,18 @@ public class RMAppImpl implements RMApp, Recoverable { currentApplicationAttemptId = this.currentAttempt.getAppAttemptId(); trackingUrl = this.currentAttempt.getTrackingUrl(); origTrackingUrl = this.currentAttempt.getOriginalTrackingUrl(); - if (UserGroupInformation.isSecurityEnabled() - && clientUserName != null) { + if (UserGroupInformation.isSecurityEnabled()) { + // get a token so the client can communicate with the app attempt + // NOTE: token may be unavailable if the attempt is not running Token attemptClientToAMToken = - new Token( - new ClientToAMTokenIdentifier( - currentApplicationAttemptId, clientUserName), - rmContext.getClientToAMTokenSecretManager()); - clientToAMToken = BuilderUtils.newClientToAMToken( - attemptClientToAMToken.getIdentifier(), - attemptClientToAMToken.getKind().toString(), - attemptClientToAMToken.getPassword(), - attemptClientToAMToken.getService().toString()); + this.currentAttempt.createClientToken(clientUserName); + if (attemptClientToAMToken != null) { + clientToAMToken = BuilderUtils.newClientToAMToken( + attemptClientToAMToken.getIdentifier(), + attemptClientToAMToken.getKind().toString(), + attemptClientToAMToken.getPassword(), + attemptClientToAMToken.getService().toString()); + } } host = this.currentAttempt.getHost(); rpcPort = this.currentAttempt.getRpcPort(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java index e9f064d648e..aa44c743ccf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java @@ -34,6 +34,7 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; /** @@ -155,6 +156,13 @@ public interface RMAppAttempt extends EventHandler { */ SecretKey getClientTokenMasterKey(); + /** + * Create a token for authenticating a client connection to the app attempt + * @param clientName the name of the client requesting the token + * @return the token or null if the attempt is not running + */ + Token createClientToken(String clientName); + /** * Get application container and resource usage information. * @return an ApplicationResourceUsageReport object. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 51e6dc9f10d..0e1b2c8a53f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -61,6 +61,7 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.AMRMTokenIdentifier; +import org.apache.hadoop.yarn.security.client.ClientToAMTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; @@ -89,6 +90,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppRepor import org.apache.hadoop.yarn.server.resourcemanager.scheduler.YarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; import org.apache.hadoop.yarn.state.InvalidStateTransitonException; @@ -508,6 +510,26 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { return this.amrmToken; } + @Override + public Token createClientToken(String client) { + this.readLock.lock(); + + try { + Token token = null; + ClientToAMTokenSecretManagerInRM secretMgr = + this.rmContext.getClientToAMTokenSecretManager(); + if (client != null && + secretMgr.getMasterKey(this.applicationAttemptId) != null) { + token = new Token( + new ClientToAMTokenIdentifier(this.applicationAttemptId, client), + secretMgr); + } + return token; + } finally { + this.readLock.unlock(); + } + } + @Override public String getDiagnostics() { this.readLock.lock(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index d6bd3f6a0c1..2c19597de9b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -19,14 +19,20 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp; import static org.mockito.Mockito.mock; +import static org.junit.Assume.assumeTrue; import java.io.IOException; +import java.util.Arrays; +import java.util.Collection; import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.SecurityUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; @@ -57,11 +63,16 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSe import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +@RunWith(value = Parameterized.class) public class TestRMAppTransitions { static final Log LOG = LogFactory.getLog(TestRMAppTransitions.class); + private boolean isSecurityEnabled; + private Configuration conf; private RMContext rmContext; private static int maxAppAttempts = YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS; @@ -132,10 +143,29 @@ public class TestRMAppTransitions { public void handle(SchedulerEvent event) { } } + + @Parameterized.Parameters + public static Collection getTestParameters() { + return Arrays.asList(new Object[][] { + { Boolean.FALSE }, + { Boolean.TRUE } + }); + } + + public TestRMAppTransitions(boolean isSecurityEnabled) { + this.isSecurityEnabled = isSecurityEnabled; + } @Before public void setUp() throws Exception { - Configuration conf = new Configuration(); + conf = new YarnConfiguration(); + AuthenticationMethod authMethod = AuthenticationMethod.SIMPLE; + if (isSecurityEnabled) { + authMethod = AuthenticationMethod.KERBEROS; + } + SecurityUtil.setAuthenticationMethod(authMethod, conf); + UserGroupInformation.setConfiguration(conf); + rmDispatcher = new DrainDispatcher(); ContainerAllocationExpirer containerAllocationExpirer = mock(ContainerAllocationExpirer.class); @@ -171,7 +201,6 @@ public class TestRMAppTransitions { String user = MockApps.newUserName(); String name = MockApps.newAppName(); String queue = MockApps.newQueue(); - Configuration conf = new YarnConfiguration(); // ensure max application attempts set to known value conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, maxAppAttempts); YarnScheduler scheduler = mock(YarnScheduler.class); @@ -191,6 +220,8 @@ public class TestRMAppTransitions { System.currentTimeMillis(), "YARN"); testAppStartState(applicationId, user, name, queue, application); + this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), + application); return application; } @@ -488,8 +519,6 @@ public class TestRMAppTransitions { // SUBMITTED => KILLED event RMAppEventType.KILL RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); - this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), - application); application.handle(event); rmDispatcher.await(); assertKilled(application); @@ -535,8 +564,6 @@ public class TestRMAppTransitions { // ACCEPTED => KILLED event RMAppEventType.KILL RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.KILL); - this.rmContext.getRMApps().putIfAbsent(application.getApplicationId(), - application); application.handle(event); rmDispatcher.await(); assertKilled(application); @@ -731,4 +758,33 @@ public class TestRMAppTransitions { report = app.createAndGetApplicationReport("clientuser", true); Assert.assertNotNull(report.getApplicationResourceUsageReport()); } + + @Test + public void testClientTokens() throws Exception { + assumeTrue(isSecurityEnabled); + + RMApp app = createNewTestApp(null); + assertAppState(RMAppState.NEW, app); + ApplicationReport report = app.createAndGetApplicationReport(null, true); + Assert.assertNull(report.getClientToAMToken()); + report = app.createAndGetApplicationReport("clientuser", true); + Assert.assertNull(report.getClientToAMToken()); + + app = testCreateAppRunning(null); + rmDispatcher.await(); + assertAppState(RMAppState.RUNNING, app); + report = app.createAndGetApplicationReport(null, true); + Assert.assertNull(report.getClientToAMToken()); + report = app.createAndGetApplicationReport("clientuser", true); + Assert.assertNotNull(report.getClientToAMToken()); + + // kill the app attempt and verify client token is unavailable + app.handle(new RMAppEvent(app.getApplicationId(), RMAppEventType.KILL)); + rmDispatcher.await(); + assertAppAndAttemptKilled(app); + report = app.createAndGetApplicationReport(null, true); + Assert.assertNull(report.getClientToAMToken()); + report = app.createAndGetApplicationReport("clientuser", true); + Assert.assertNull(report.getClientToAMToken()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index 2290284eaa8..39c633761d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -30,13 +30,17 @@ import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.mockito.Mockito.spy; +import java.util.Arrays; +import java.util.Collection; import java.util.Collections; import java.util.List; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -85,7 +89,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManag import org.junit.After; import org.junit.Before; import org.junit.Test; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +@RunWith(value = Parameterized.class) public class TestRMAppAttemptTransitions { private static final Log LOG = @@ -95,6 +102,7 @@ public class TestRMAppAttemptTransitions { private static final String RM_WEBAPP_ADDR = YarnConfiguration.getRMWebAppHostAndPort(new Configuration()); + private boolean isSecurityEnabled; private RMContext rmContext; private YarnScheduler scheduler; private ApplicationMasterService masterService; @@ -162,8 +170,26 @@ public class TestRMAppAttemptTransitions { private ApplicationSubmissionContext submissionContext = null; private boolean unmanagedAM; + @Parameterized.Parameters + public static Collection getTestParameters() { + return Arrays.asList(new Object[][] { + { Boolean.FALSE }, + { Boolean.TRUE } + }); + } + + public TestRMAppAttemptTransitions(Boolean isSecurityEnabled) { + this.isSecurityEnabled = isSecurityEnabled; + } + @Before public void setUp() throws Exception { + AuthenticationMethod authMethod = AuthenticationMethod.SIMPLE; + if (isSecurityEnabled) { + authMethod = AuthenticationMethod.KERBEROS; + } + SecurityUtil.setAuthenticationMethod(authMethod, conf); + UserGroupInformation.setConfiguration(conf); InlineDispatcher rmDispatcher = new InlineDispatcher(); ContainerAllocationExpirer containerAllocationExpirer = @@ -270,7 +296,9 @@ public class TestRMAppAttemptTransitions { if (UserGroupInformation.isSecurityEnabled()) { verify(clientToAMTokenManager).registerApplication( applicationAttempt.getAppAttemptId()); + assertNotNull(applicationAttempt.createClientToken("some client")); } + assertNull(applicationAttempt.createClientToken(null)); assertNotNull(applicationAttempt.getAMRMToken()); // Check events verify(masterService). @@ -883,6 +911,9 @@ public class TestRMAppAttemptTransitions { verify(amRMTokenManager, times(count)).applicationMasterFinished(appAttemptId); if (UserGroupInformation.isSecurityEnabled()) { verify(clientToAMTokenManager, times(count)).unRegisterApplication(appAttemptId); + if (count > 0) { + assertNull(applicationAttempt.createClientToken("client")); + } } } } From 0f91d8485a6cd96153ad35e9babab248b20f53dc Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Mon, 9 Sep 2013 21:48:09 +0000 Subject: [PATCH 140/153] YARN-910. Augmented auxiliary services to listen for container starts and completions in addition to application events. Contributed by Alejandro Abdelnur. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521298 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 + .../yarn/server/api/AuxiliaryService.java | 25 ++++++- .../yarn/server/api/ContainerContext.java | 75 +++++++++++++++++++ .../api/ContainerInitializationContext.java | 44 +++++++++++ .../api/ContainerTerminationContext.java | 44 +++++++++++ .../containermanager/AuxServices.java | 18 ++++- .../containermanager/AuxServicesEvent.java | 19 +++++ .../AuxServicesEventType.java | 4 +- .../container/ContainerImpl.java | 11 +++ .../containermanager/TestAuxServices.java | 61 ++++++++++++++- 10 files changed, 300 insertions(+), 5 deletions(-) create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java create mode 100644 hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 03747b3098f..ff351494297 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -28,6 +28,10 @@ Release 2.3.0 - UNRELEASED YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza) + YARN-910. Augmented auxiliary services to listen for container starts and + completions in addition to application events. (Alejandro Abdelnur via + vinodkv) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/AuxiliaryService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/AuxiliaryService.java index 275f2a91038..58b06e274a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/AuxiliaryService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/AuxiliaryService.java @@ -79,4 +79,27 @@ public abstract class AuxiliaryService extends AbstractService { * applications. */ public abstract ByteBuffer getMetaData(); -} \ No newline at end of file + + /** + * A new container is started on this NodeManager. This is a signal to + * this {@link AuxiliaryService} about the container initialization. + * This method is called when the NodeManager receives the container launch + * command from the ApplicationMaster and before the container process is + * launched. + * + * @param initContainerContext context for the container's initialization + */ + public void initializeContainer(ContainerInitializationContext + initContainerContext) { + } + + /** + * A container is finishing on this NodeManager. This is a signal to this + * {@link AuxiliaryService} about the same. + * + * @param stopContainerContext context for the container termination + */ + public void stopContainer(ContainerTerminationContext stopContainerContext) { + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java new file mode 100644 index 00000000000..d13159b308f --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerContext.java @@ -0,0 +1,75 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.api; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Resource; + +/** + * Base context class for {@link AuxiliaryService} initializing and stopping a + * container. + */ +@Public +@Evolving +public class ContainerContext { + private final String user; + private final ContainerId containerId; + private final Resource resource; + + @Private + @Unstable + public ContainerContext(String user, ContainerId containerId, + Resource resource) { + this.user = user; + this.containerId = containerId; + this.resource = resource; + } + + /** + * Get user of the container being initialized or stopped. + * + * @return the user + */ + public String getUser() { + return user; + } + + /** + * Get {@link ContainerId} of the container being initialized or stopped. + * + * @return the container ID + */ + public ContainerId getContainerId() { + return containerId; + } + + /** + * Get {@link Resource} the resource capability allocated to the container + * being initialized or stopped. + * + * @return the resource capability. + */ + public Resource getResource() { + return resource; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java new file mode 100644 index 00000000000..5b5bbda0c07 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerInitializationContext.java @@ -0,0 +1,44 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.api; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Resource; + +/** + * Initialization context for {@link AuxiliaryService} when starting a + * container. + * + */ +@Public +@Evolving +public class ContainerInitializationContext extends ContainerContext { + + @Private + @Unstable + public ContainerInitializationContext(String user, ContainerId containerId, + Resource resource) { + super(user, containerId, resource); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java new file mode 100644 index 00000000000..34ba73e2213 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/server/api/ContainerTerminationContext.java @@ -0,0 +1,44 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + +package org.apache.hadoop.yarn.server.api; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Resource; + +/** + * Termination context for {@link AuxiliaryService} when stopping a + * container. + * + */ +@Public +@Evolving +public class ContainerTerminationContext extends ContainerContext { + + @Private + @Unstable + public ContainerTerminationContext(String user, ContainerId containerId, + Resource resource) { + super(user, containerId, resource); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java index 955ccbf19ea..13f43650a47 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServices.java @@ -37,6 +37,8 @@ import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.server.api.AuxiliaryService; import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext; +import org.apache.hadoop.yarn.server.api.ContainerInitializationContext; +import org.apache.hadoop.yarn.server.api.ContainerTerminationContext; public class AuxServices extends AbstractService implements ServiceStateChangeListener, EventHandler { @@ -178,7 +180,21 @@ public class AuxServices extends AbstractService .getApplicationID())); } break; - default: + case CONTAINER_INIT: + for (AuxiliaryService serv : serviceMap.values()) { + serv.initializeContainer(new ContainerInitializationContext( + event.getUser(), event.getContainer().getContainerId(), + event.getContainer().getResource())); + } + break; + case CONTAINER_STOP: + for (AuxiliaryService serv : serviceMap.values()) { + serv.stopContainer(new ContainerTerminationContext( + event.getUser(), event.getContainer().getContainerId(), + event.getContainer().getResource())); + } + break; + default: throw new RuntimeException("Unknown type: " + event.getType()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java index 4b9c93157b2..1e5a9a737f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEvent.java @@ -21,7 +21,10 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; import java.nio.ByteBuffer; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.event.AbstractEvent; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .Container; public class AuxServicesEvent extends AbstractEvent { @@ -29,18 +32,30 @@ public class AuxServicesEvent extends AbstractEvent { private final String serviceId; private final ByteBuffer serviceData; private final ApplicationId appId; + private final Container container; public AuxServicesEvent(AuxServicesEventType eventType, ApplicationId appId) { this(eventType, null, appId, null, null); } + public AuxServicesEvent(AuxServicesEventType eventType, Container container) { + this(eventType, null, container.getContainerId().getApplicationAttemptId() + .getApplicationId(), null, null, container); + } + public AuxServicesEvent(AuxServicesEventType eventType, String user, ApplicationId appId, String serviceId, ByteBuffer serviceData) { + this(eventType, user, appId, serviceId, serviceData, null); + } + public AuxServicesEvent(AuxServicesEventType eventType, String user, + ApplicationId appId, String serviceId, ByteBuffer serviceData, + Container container) { super(eventType); this.user = user; this.appId = appId; this.serviceId = serviceId; this.serviceData = serviceData; + this.container = container; } public String getServiceID() { @@ -59,4 +74,8 @@ public class AuxServicesEvent extends AbstractEvent { return appId; } + public Container getContainer() { + return container; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java index b8276b0ba14..45f3c8f2906 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/AuxServicesEventType.java @@ -20,5 +20,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager; public enum AuxServicesEventType { APPLICATION_INIT, - APPLICATION_STOP + APPLICATION_STOP, + CONTAINER_INIT, + CONTAINER_STOP } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 54a2cbec56d..c2d32b57bee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -503,6 +503,9 @@ public class ContainerImpl implements Container { final ContainerLaunchContext ctxt = container.launchContext; container.metrics.initingContainer(); + container.dispatcher.getEventHandler().handle(new AuxServicesEvent + (AuxServicesEventType.CONTAINER_INIT, container)); + // Inform the AuxServices about the opaque serviceData Map csd = ctxt.getServiceData(); if (csd != null) { @@ -820,8 +823,16 @@ public class ContainerImpl implements Container { static class ContainerDoneTransition implements SingleArcTransition { @Override + @SuppressWarnings("unchecked") public void transition(ContainerImpl container, ContainerEvent event) { container.finished(); + //if the current state is NEW it means the CONTAINER_INIT was never + // sent for the event, thus no need to send the CONTAINER_STOP + if (container.getCurrentState() + != org.apache.hadoop.yarn.api.records.ContainerState.NEW) { + container.dispatcher.getEventHandler().handle(new AuxServicesEvent + (AuxServicesEventType.CONTAINER_STOP, container)); + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java index fb4b69a21f0..81f758ee063 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestAuxServices.java @@ -22,6 +22,7 @@ import static org.apache.hadoop.service.Service.STATE.INITED; import static org.apache.hadoop.service.Service.STATE.STARTED; import static org.apache.hadoop.service.Service.STATE.STOPPED; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -34,11 +35,21 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.service.Service; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.ApplicationInitializationContext; import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext; import org.apache.hadoop.yarn.server.api.AuxiliaryService; +import org.apache.hadoop.yarn.server.api.ContainerInitializationContext; +import org.apache.hadoop.yarn.server.api.ContainerTerminationContext; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .Container; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.container + .ContainerImpl; import org.junit.Test; public class TestAuxServices { @@ -52,8 +63,10 @@ public class TestAuxServices { private int remaining_stop; private ByteBuffer meta = null; private ArrayList stoppedApps; + private ContainerId containerId; + private Resource resource; - LightService(String name, char idef, int expected_appId) { + LightService(String name, char idef, int expected_appId) { this(name, idef, expected_appId, null); } LightService(String name, char idef, int expected_appId, ByteBuffer meta) { @@ -95,7 +108,22 @@ public class TestAuxServices { public ByteBuffer getMetaData() { return meta; } - } + + @Override + public void initializeContainer( + ContainerInitializationContext initContainerContext) { + containerId = initContainerContext.getContainerId(); + resource = initContainerContext.getResource(); + } + + @Override + public void stopContainer( + ContainerTerminationContext stopContainerContext) { + containerId = stopContainerContext.getContainerId(); + resource = stopContainerContext.getResource(); + } + + } static class ServiceA extends LightService { public ServiceA() { @@ -142,6 +170,35 @@ public class TestAuxServices { assertEquals("app not properly stopped", 1, appIds.size()); assertTrue("wrong app stopped", appIds.contains((Integer)66)); } + + for (AuxiliaryService serv : servs) { + assertNull(((LightService) serv).containerId); + assertNull(((LightService) serv).resource); + } + + + ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId1, 1); + ContainerTokenIdentifier cti = new ContainerTokenIdentifier( + ContainerId.newInstance(attemptId, 1), "", "", + Resource.newInstance(1, 1), 0,0,0); + Container container = new ContainerImpl(null, null, null, null, null, cti); + ContainerId containerId = container.getContainerId(); + Resource resource = container.getResource(); + event = new AuxServicesEvent(AuxServicesEventType.CONTAINER_INIT,container); + aux.handle(event); + for (AuxiliaryService serv : servs) { + assertEquals(containerId, ((LightService) serv).containerId); + assertEquals(resource, ((LightService) serv).resource); + ((LightService) serv).containerId = null; + ((LightService) serv).resource = null; + } + + event = new AuxServicesEvent(AuxServicesEventType.CONTAINER_STOP, container); + aux.handle(event); + for (AuxiliaryService serv : servs) { + assertEquals(containerId, ((LightService) serv).containerId); + assertEquals(resource, ((LightService) serv).resource); + } } @Test From 59b5490989fad4c8e80dd85c3419810cdc8332f7 Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Tue, 10 Sep 2013 01:24:45 +0000 Subject: [PATCH 141/153] YARN-292. Fixed FifoScheduler and FairScheduler to make their applications data structures thread safe to avoid RM crashing with ArrayIndexOutOfBoundsException. Contributed by Zhijie Shen. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521328 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 4 + .../rmapp/attempt/RMAppAttemptImpl.java | 6 +- .../scheduler/capacity/CapacityScheduler.java | 5 +- .../scheduler/fair/FairScheduler.java | 5 +- .../scheduler/fifo/FifoScheduler.java | 14 ++-- .../capacity/TestCapacityScheduler.java | 75 ++++++++++++++++++- .../scheduler/fair/TestFairScheduler.java | 9 +++ .../scheduler/fifo/TestFifoScheduler.java | 13 +++- 8 files changed, 117 insertions(+), 14 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index ff351494297..b211a18bbb7 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -176,6 +176,10 @@ Release 2.1.1-beta - UNRELEASED invalid client token key errors when an appliation is about to finish. (Jason Lowe via vinodkv) + YARN-292. Fixed FifoScheduler and FairScheduler to make their applications + data structures thread safe to avoid RM crashing with + ArrayIndexOutOfBoundsException. (Zhijie Shen via vinodkv) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 0e1b2c8a53f..94a0f94b572 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -833,7 +833,11 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { Allocation amContainerAllocation = appAttempt.scheduler.allocate( appAttempt.applicationAttemptId, EMPTY_CONTAINER_REQUEST_LIST, EMPTY_CONTAINER_RELEASE_LIST, null, null); - + // There must be at least one container allocated, because a + // CONTAINER_ALLOCATED is emitted after an RMContainer is constructed, + // and is put in SchedulerApplication#newlyAllocatedContainers. Then, + // YarnScheduler#allocate will fetch it. + assert amContainerAllocation.getContainers().size() != 0; // Set the masterContainer appAttempt.setMasterContainer(amContainerAllocation.getContainers().get( 0)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index bbf7f5cf699..9edf420da00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -80,6 +80,8 @@ import org.apache.hadoop.yarn.server.utils.Lock; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; +import com.google.common.annotations.VisibleForTesting; + @LimitedPrivate("yarn") @Evolving @SuppressWarnings("unchecked") @@ -179,7 +181,8 @@ public class CapacityScheduler private Resource minimumAllocation; private Resource maximumAllocation; - private Map applications = + @VisibleForTesting + protected Map applications = new ConcurrentHashMap(); private boolean initialized = false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 72000e91fc3..73221042a76 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -37,7 +37,6 @@ import org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate; import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -50,6 +49,7 @@ import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; @@ -154,8 +154,9 @@ public class FairScheduler implements ResourceScheduler { // This stores per-application scheduling information, indexed by // attempt ID's for fast lookup. + @VisibleForTesting protected Map applications = - new HashMap(); + new ConcurrentHashMap(); // Nodes in the cluster, indexed by NodeId private Map nodes = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 115d2089c34..bac013f365a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -25,8 +25,8 @@ import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; -import java.util.TreeMap; import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentSkipListMap; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -36,12 +36,10 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; -import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; @@ -50,7 +48,9 @@ import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.QueueUserACLInfo; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger; @@ -90,6 +90,8 @@ import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; import org.apache.hadoop.yarn.util.resource.Resources; +import com.google.common.annotations.VisibleForTesting; + @LimitedPrivate("yarn") @Evolving @SuppressWarnings("unchecked") @@ -113,8 +115,10 @@ public class FifoScheduler implements ResourceScheduler, Configurable { private Resource maximumAllocation; private boolean usePortForNodeName; - private Map applications - = new TreeMap(); + // Use ConcurrentSkipListMap because applications need to be ordered + @VisibleForTesting + protected Map applications + = new ConcurrentSkipListMap(); private ActiveUsersManager activeUsersManager; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index ec486d78ea0..38df24fa991 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -19,22 +19,27 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; import java.io.IOException; +import java.lang.reflect.Constructor; import java.util.Comparator; import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicBoolean; import junit.framework.Assert; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.net.NetworkTopology; -import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueInfo; @@ -44,19 +49,24 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; import org.apache.hadoop.yarn.server.resourcemanager.Application; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; +import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.Task; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ActiveUsersManager; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerApplication; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM; -import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; +import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.After; import org.junit.Before; @@ -525,4 +535,63 @@ public class TestCapacityScheduler { assertTrue(appComparator.compare(app2, app3) < 0); } + @Test + public void testConcurrentAccessOnApplications() throws Exception { + CapacityScheduler cs = new CapacityScheduler(); + verifyConcurrentAccessOnApplications( + cs.applications, FiCaSchedulerApp.class); + } + + public static + void verifyConcurrentAccessOnApplications( + final Map applications, Class clazz) + throws Exception { + final int size = 10000; + final ApplicationId appId = ApplicationId.newInstance(0, 0); + final Constructor ctor = clazz.getDeclaredConstructor( + ApplicationAttemptId.class, String.class, Queue.class, + ActiveUsersManager.class, RMContext.class); + + ApplicationAttemptId appAttemptId0 + = ApplicationAttemptId.newInstance(appId, 0); + applications.put(appAttemptId0, ctor.newInstance( + appAttemptId0, null, mock(Queue.class), null, null)); + assertNotNull(applications.get(appAttemptId0)); + + // Imitating the thread of scheduler that will add and remove apps + final AtomicBoolean finished = new AtomicBoolean(false); + final AtomicBoolean failed = new AtomicBoolean(false); + Thread t = new Thread() { + + @Override + public void run() { + for (int i = 1; i <= size; ++i) { + ApplicationAttemptId appAttemptId + = ApplicationAttemptId.newInstance(appId, i); + try { + applications.put(appAttemptId, ctor.newInstance( + appAttemptId, null, mock(Queue.class), null, null)); + } catch (Exception e) { + failed.set(true); + finished.set(true); + return; + } + } + for (int i = 1; i <= size; ++i) { + ApplicationAttemptId appAttemptId + = ApplicationAttemptId.newInstance(appId, i); + applications.remove(appAttemptId); + } + finished.set(true); + } + }; + t.start(); + + // Imitating the thread of rmappattempt that will get the app + while (!finished.get()) { + assertNotNull(applications.get(appAttemptId0)); + } + assertFalse(failed.get()); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 17ff7ded850..0b348eaf044 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -72,6 +72,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppRemovedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; @@ -79,6 +80,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemoved import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.DominantResourceFairnessPolicy; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.policies.FifoPolicy; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Clock; import org.apache.hadoop.yarn.util.resource.Resources; @@ -2196,4 +2198,11 @@ public class TestFairScheduler { assertEquals(1, app.getLiveContainers().size()); } + @Test + public void testConcurrentAccessOnApplications() throws Exception { + FairScheduler fs = new FairScheduler(); + TestCapacityScheduler.verifyConcurrentAccessOnApplications( + fs.applications, FSSchedulerApp.class); + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index b71726a355c..30ce68e73ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -51,13 +51,15 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerAppReport; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestCapacityScheduler; +import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; +import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; -import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.After; import org.junit.Before; @@ -414,7 +416,14 @@ public class TestFifoScheduler { LOG.info("--- END: testFifoScheduler ---"); } - + + @Test + public void testConcurrentAccessOnApplications() throws Exception { + FifoScheduler fs = new FifoScheduler(); + TestCapacityScheduler.verifyConcurrentAccessOnApplications( + fs.applications, FiCaSchedulerApp.class); + } + private void checkApplicationResourceUsage(int expected, Application application) { Assert.assertEquals(expected, application.getUsedResources().getMemory()); From 11944e52fdb93e72272e2a9353730fa1603f8352 Mon Sep 17 00:00:00 2001 From: Alejandro Abdelnur Date: Tue, 10 Sep 2013 12:14:49 +0000 Subject: [PATCH 142/153] Amending yarn CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521454 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index b211a18bbb7..d86d998c4f2 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -28,10 +28,6 @@ Release 2.3.0 - UNRELEASED YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza) - YARN-910. Augmented auxiliary services to listen for container starts and - completions in addition to application events. (Alejandro Abdelnur via - vinodkv) - OPTIMIZATIONS BUG FIXES @@ -96,6 +92,10 @@ Release 2.1.1-beta - UNRELEASED YARN-696. Changed RMWebservice apps call to take in multiple application states. (Trevor Lorimer via vinodkv) + YARN-910. Augmented auxiliary services to listen for container starts and + completions in addition to application events. (Alejandro Abdelnur via + vinodkv) + OPTIMIZATIONS BUG FIXES From bcb865314f8ce89fc03a3e3de487fb6d75ddddd8 Mon Sep 17 00:00:00 2001 From: Bikas Saha Date: Tue, 10 Sep 2013 17:43:51 +0000 Subject: [PATCH 143/153] YARN-1098. Separate out RM services into Always On and Active (Karthik Kambatla via bikas) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521560 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 2 + .../resourcemanager/ResourceManager.java | 434 +++++++++--------- 2 files changed, 227 insertions(+), 209 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index d86d998c4f2..76365a8b885 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -27,6 +27,8 @@ Release 2.3.0 - UNRELEASED IMPROVEMENTS YARN-905. Add state filters to nodes CLI (Wei Yan via Sandy Ryza) + YARN-1098. Separate out RM services into Always On and Active (Karthik + Kambatla via bikas) OPTIMIZATIONS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java index ee418c1937c..8c0b195f707 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java @@ -107,9 +107,18 @@ public class ResourceManager extends CompositeService implements Recoverable { private static final Log LOG = LogFactory.getLog(ResourceManager.class); public static final long clusterTimeStamp = System.currentTimeMillis(); + /** + * "Active" services. Services that need to run only on the Active RM. + * These services are managed (initialized, started, stopped) by the + * {@link CompositeService} RMActiveServices. + * + * RM is active when (1) HA is disabled, or (2) HA is enabled and the RM is + * in Active state. + */ + protected RMActiveServices activeServices; protected ClientToAMTokenSecretManagerInRM clientToAMSecretManager = new ClientToAMTokenSecretManagerInRM(); - + protected RMContainerTokenSecretManager containerTokenSecretManager; protected NMTokenSecretManagerInRM nmTokenSecretManager; @@ -135,6 +144,8 @@ public class ResourceManager extends CompositeService implements Recoverable { protected ResourceTrackerService resourceTracker; private boolean recoveryEnabled; + /** End of Active services */ + private Configuration conf; public ResourceManager() { @@ -147,137 +158,11 @@ public class ResourceManager extends CompositeService implements Recoverable { @Override protected void serviceInit(Configuration conf) throws Exception { - validateConfigs(conf); - this.conf = conf; - this.conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); - - this.rmDispatcher = createDispatcher(); - addIfService(this.rmDispatcher); - - this.amRmTokenSecretManager = createAMRMTokenSecretManager(conf); - - this.containerAllocationExpirer = new ContainerAllocationExpirer( - this.rmDispatcher); - addService(this.containerAllocationExpirer); - - AMLivelinessMonitor amLivelinessMonitor = createAMLivelinessMonitor(); - addService(amLivelinessMonitor); - - AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); - addService(amFinishingMonitor); - - this.containerTokenSecretManager = createContainerTokenSecretManager(conf); - this.nmTokenSecretManager = createNMTokenSecretManager(conf); - - boolean isRecoveryEnabled = conf.getBoolean( - YarnConfiguration.RECOVERY_ENABLED, - YarnConfiguration.DEFAULT_RM_RECOVERY_ENABLED); - - RMStateStore rmStore = null; - if(isRecoveryEnabled) { - recoveryEnabled = true; - rmStore = RMStateStoreFactory.getStore(conf); - } else { - recoveryEnabled = false; - rmStore = new NullRMStateStore(); - } - - try { - rmStore.init(conf); - rmStore.setRMDispatcher(rmDispatcher); - } catch (Exception e) { - // the Exception from stateStore.init() needs to be handled for - // HA and we need to give up master status if we got fenced - LOG.error("Failed to init state store", e); - ExitUtil.terminate(1, e); - } - - if (UserGroupInformation.isSecurityEnabled()) { - this.delegationTokenRenewer = createDelegationTokenRenewer(); - } - - this.rmContext = - new RMContextImpl(this.rmDispatcher, rmStore, - this.containerAllocationExpirer, amLivelinessMonitor, - amFinishingMonitor, delegationTokenRenewer, this.amRmTokenSecretManager, - this.containerTokenSecretManager, this.nmTokenSecretManager, - this.clientToAMSecretManager); - - // Register event handler for NodesListManager - this.nodesListManager = new NodesListManager(this.rmContext); - this.rmDispatcher.register(NodesListManagerEventType.class, - this.nodesListManager); - addService(nodesListManager); - - // Initialize the scheduler - this.scheduler = createScheduler(); - this.schedulerDispatcher = createSchedulerEventDispatcher(); - addIfService(this.schedulerDispatcher); - this.rmDispatcher.register(SchedulerEventType.class, - this.schedulerDispatcher); - - // Register event handler for RmAppEvents - this.rmDispatcher.register(RMAppEventType.class, - new ApplicationEventDispatcher(this.rmContext)); - - // Register event handler for RmAppAttemptEvents - this.rmDispatcher.register(RMAppAttemptEventType.class, - new ApplicationAttemptEventDispatcher(this.rmContext)); - - // Register event handler for RmNodes - this.rmDispatcher.register(RMNodeEventType.class, - new NodeEventDispatcher(this.rmContext)); - - this.nmLivelinessMonitor = createNMLivelinessMonitor(); - addService(this.nmLivelinessMonitor); - - this.resourceTracker = createResourceTrackerService(); - addService(resourceTracker); - - DefaultMetricsSystem.initialize("ResourceManager"); - JvmMetrics.initSingleton("ResourceManager", null); - - try { - this.scheduler.reinitialize(conf, this.rmContext); - } catch (IOException ioe) { - throw new RuntimeException("Failed to initialize scheduler", ioe); - } - - // creating monitors that handle preemption - createPolicyMonitors(); - - masterService = createApplicationMasterService(); - addService(masterService) ; - - this.applicationACLsManager = new ApplicationACLsManager(conf); - - this.rmAppManager = createRMAppManager(); - // Register event handler for RMAppManagerEvents - this.rmDispatcher.register(RMAppManagerEventType.class, - this.rmAppManager); - this.rmDTSecretManager = createRMDelegationTokenSecretManager(this.rmContext); - rmContext.setRMDelegationTokenSecretManager(this.rmDTSecretManager); - clientRM = createClientRMService(); - rmContext.setClientRMService(clientRM); - addService(clientRM); - - adminService = createAdminService(clientRM, masterService, resourceTracker); - addService(adminService); - - this.applicationMasterLauncher = createAMLauncher(); - this.rmDispatcher.register(AMLauncherEventType.class, - this.applicationMasterLauncher); - - addService(applicationMasterLauncher); - if (UserGroupInformation.isSecurityEnabled()) { - addService(delegationTokenRenewer); - delegationTokenRenewer.setRMContext(rmContext); - } - new RMNMInfo(this.rmContext, this.scheduler); - + activeServices = new RMActiveServices(); + addService(activeServices); super.serviceInit(conf); } @@ -378,6 +263,217 @@ public class ResourceManager extends CompositeService implements Recoverable { } } + /** + * RMActiveServices handles all the Active services in the RM. + */ + @Private + class RMActiveServices extends CompositeService { + RMActiveServices() { + super("RMActiveServices"); + } + + @Override + protected void serviceInit(Configuration configuration) throws Exception { + conf.setBoolean(Dispatcher.DISPATCHER_EXIT_ON_ERROR_KEY, true); + + rmDispatcher = createDispatcher(); + addIfService(rmDispatcher); + + amRmTokenSecretManager = createAMRMTokenSecretManager(conf); + + containerAllocationExpirer = new ContainerAllocationExpirer(rmDispatcher); + addService(containerAllocationExpirer); + + AMLivelinessMonitor amLivelinessMonitor = createAMLivelinessMonitor(); + addService(amLivelinessMonitor); + + AMLivelinessMonitor amFinishingMonitor = createAMLivelinessMonitor(); + addService(amFinishingMonitor); + + containerTokenSecretManager = createContainerTokenSecretManager(conf); + nmTokenSecretManager = createNMTokenSecretManager(conf); + + boolean isRecoveryEnabled = conf.getBoolean( + YarnConfiguration.RECOVERY_ENABLED, + YarnConfiguration.DEFAULT_RM_RECOVERY_ENABLED); + + RMStateStore rmStore = null; + if(isRecoveryEnabled) { + recoveryEnabled = true; + rmStore = RMStateStoreFactory.getStore(conf); + } else { + recoveryEnabled = false; + rmStore = new NullRMStateStore(); + } + + try { + rmStore.init(conf); + rmStore.setRMDispatcher(rmDispatcher); + } catch (Exception e) { + // the Exception from stateStore.init() needs to be handled for + // HA and we need to give up master status if we got fenced + LOG.error("Failed to init state store", e); + ExitUtil.terminate(1, e); + } + + if (UserGroupInformation.isSecurityEnabled()) { + delegationTokenRenewer = createDelegationTokenRenewer(); + } + + rmContext = new RMContextImpl( + rmDispatcher, rmStore, containerAllocationExpirer, amLivelinessMonitor, + amFinishingMonitor, delegationTokenRenewer, amRmTokenSecretManager, + containerTokenSecretManager, nmTokenSecretManager, + clientToAMSecretManager); + + // Register event handler for NodesListManager + nodesListManager = new NodesListManager(rmContext); + rmDispatcher.register(NodesListManagerEventType.class, nodesListManager); + addService(nodesListManager); + + // Initialize the scheduler + scheduler = createScheduler(); + schedulerDispatcher = createSchedulerEventDispatcher(); + addIfService(schedulerDispatcher); + rmDispatcher.register(SchedulerEventType.class, schedulerDispatcher); + + // Register event handler for RmAppEvents + rmDispatcher.register(RMAppEventType.class, + new ApplicationEventDispatcher(rmContext)); + + // Register event handler for RmAppAttemptEvents + rmDispatcher.register(RMAppAttemptEventType.class, + new ApplicationAttemptEventDispatcher(rmContext)); + + // Register event handler for RmNodes + rmDispatcher.register( + RMNodeEventType.class, new NodeEventDispatcher(rmContext)); + + nmLivelinessMonitor = createNMLivelinessMonitor(); + addService(nmLivelinessMonitor); + + resourceTracker = createResourceTrackerService(); + addService(resourceTracker); + + DefaultMetricsSystem.initialize("ResourceManager"); + JvmMetrics.initSingleton("ResourceManager", null); + + try { + scheduler.reinitialize(conf, rmContext); + } catch (IOException ioe) { + throw new RuntimeException("Failed to initialize scheduler", ioe); + } + + // creating monitors that handle preemption + createPolicyMonitors(); + + masterService = createApplicationMasterService(); + addService(masterService) ; + + applicationACLsManager = new ApplicationACLsManager(conf); + + rmAppManager = createRMAppManager(); + // Register event handler for RMAppManagerEvents + rmDispatcher.register(RMAppManagerEventType.class, rmAppManager); + rmDTSecretManager = createRMDelegationTokenSecretManager(rmContext); + rmContext.setRMDelegationTokenSecretManager(rmDTSecretManager); + clientRM = createClientRMService(); + rmContext.setClientRMService(clientRM); + addService(clientRM); + + adminService = createAdminService(clientRM, masterService, resourceTracker); + addService(adminService); + + applicationMasterLauncher = createAMLauncher(); + rmDispatcher.register(AMLauncherEventType.class, + applicationMasterLauncher); + + addService(applicationMasterLauncher); + if (UserGroupInformation.isSecurityEnabled()) { + addService(delegationTokenRenewer); + delegationTokenRenewer.setRMContext(rmContext); + } + + new RMNMInfo(rmContext, scheduler); + + super.serviceInit(conf); + } + + @Override + protected void serviceStart() throws Exception { + amRmTokenSecretManager.start(); + containerTokenSecretManager.start(); + nmTokenSecretManager.start(); + + RMStateStore rmStore = rmContext.getStateStore(); + // The state store needs to start irrespective of recoveryEnabled as apps + // need events to move to further states. + rmStore.start(); + + if(recoveryEnabled) { + try { + RMState state = rmStore.loadState(); + recover(state); + } catch (Exception e) { + // the Exception from loadState() needs to be handled for + // HA and we need to give up master status if we got fenced + LOG.error("Failed to load/recover state", e); + ExitUtil.terminate(1, e); + } + } + + startWepApp(); + try { + rmDTSecretManager.startThreads(); + } catch(IOException ie) { + throw new YarnRuntimeException("Failed to start secret manager threads", ie); + } + + if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { + String hostname = getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS, + YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS); + hostname = (hostname.contains(":")) ? hostname.substring(0, hostname.indexOf(":")) : hostname; + int port = webApp.port(); + String resolvedAddress = hostname + ":" + port; + conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resolvedAddress); + } + + super.serviceStart(); + } + + @Override + protected void serviceStop() throws Exception { + if (webApp != null) { + webApp.stop(); + } + if (rmDTSecretManager != null) { + rmDTSecretManager.stopThreads(); + } + + if (amRmTokenSecretManager != null) { + amRmTokenSecretManager.stop(); + } + if (containerTokenSecretManager != null) { + containerTokenSecretManager.stop(); + } + if(nmTokenSecretManager != null) { + nmTokenSecretManager.stop(); + } + + DefaultMetricsSystem.shutdown(); + + if (rmContext != null) { + RMStateStore store = rmContext.getStateStore(); + try { + store.close(); + } catch (Exception e) { + LOG.error("Error closing store.", e); + } + } + super.serviceStop(); + } + } + @Private public static class SchedulerEventDispatcher extends AbstractService implements EventHandler { @@ -620,54 +716,7 @@ public class ResourceManager extends CompositeService implements Recoverable { throw new YarnRuntimeException("Failed to login", ie); } - this.amRmTokenSecretManager.start(); - this.containerTokenSecretManager.start(); - this.nmTokenSecretManager.start(); - - RMStateStore rmStore = rmContext.getStateStore(); - // The state store needs to start irrespective of recoveryEnabled as apps - // need events to move to further states. - rmStore.start(); - - if(recoveryEnabled) { - try { - RMState state = rmStore.loadState(); - recover(state); - } catch (Exception e) { - // the Exception from loadState() needs to be handled for - // HA and we need to give up master status if we got fenced - LOG.error("Failed to load/recover state", e); - ExitUtil.terminate(1, e); - } - } - - startWepApp(); - try { - rmDTSecretManager.startThreads(); - } catch(IOException ie) { - throw new YarnRuntimeException("Failed to start secret manager threads", ie); - } - - if (getConfig().getBoolean(YarnConfiguration.IS_MINI_YARN_CLUSTER, false)) { - String hostname = getConfig().get(YarnConfiguration.RM_WEBAPP_ADDRESS, - YarnConfiguration.DEFAULT_RM_WEBAPP_ADDRESS); - hostname = (hostname.contains(":")) ? hostname.substring(0, hostname.indexOf(":")) : hostname; - int port = webApp.port(); - String resolvedAddress = hostname + ":" + port; - conf.set(YarnConfiguration.RM_WEBAPP_ADDRESS, resolvedAddress); - } - super.serviceStart(); - - /*synchronized(shutdown) { - try { - while(!shutdown.get()) { - shutdown.wait(); - } - } catch(InterruptedException ie) { - LOG.info("Interrupted while waiting", ie); - } - }*/ } protected void doSecureLogin() throws IOException { @@ -677,39 +726,6 @@ public class ResourceManager extends CompositeService implements Recoverable { @Override protected void serviceStop() throws Exception { - if (webApp != null) { - webApp.stop(); - } - if (rmDTSecretManager != null) { - rmDTSecretManager.stopThreads(); - } - - if (amRmTokenSecretManager != null) { - this.amRmTokenSecretManager.stop(); - } - if (containerTokenSecretManager != null) { - this.containerTokenSecretManager.stop(); - } - if(nmTokenSecretManager != null) { - nmTokenSecretManager.stop(); - } - - /*synchronized(shutdown) { - shutdown.set(true); - shutdown.notifyAll(); - }*/ - - DefaultMetricsSystem.shutdown(); - - if (rmContext != null) { - RMStateStore store = rmContext.getStateStore(); - try { - store.close(); - } catch (Exception e) { - LOG.error("Error closing store.", e); - } - } - super.serviceStop(); } From 75c31df9dd1d9435aec4cc245e1e58ca326e6159 Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Tue, 10 Sep 2013 18:31:50 +0000 Subject: [PATCH 144/153] MAPREDUCE-5020. Compile failure with JDK8 (Trevor Robinson via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521576 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 2 ++ .../org/apache/hadoop/mapreduce/lib/partition/InputSampler.java | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 13f0079fc3c..24e7da1a7ba 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -256,6 +256,8 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5414. TestTaskAttempt fails in JDK7 with NPE (Nemon Lou via devaraj) + MAPREDUCE-5020. Compile failure with JDK8 (Trevor Robinson via tgraves) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java index 742316875ce..e709a2823aa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/partition/InputSampler.java @@ -317,7 +317,7 @@ public class InputSampler extends Configured implements Tool { final InputFormat inf = ReflectionUtils.newInstance(job.getInputFormatClass(), conf); int numPartitions = job.getNumReduceTasks(); - K[] samples = sampler.getSample(inf, job); + K[] samples = (K[])sampler.getSample(inf, job); LOG.info("Using " + samples.length + " samples"); RawComparator comparator = (RawComparator) job.getSortComparator(); From 613979c8fdacf25fd563395ecc399c4de94d3ee7 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Tue, 10 Sep 2013 19:29:45 +0000 Subject: [PATCH 145/153] HDFS-5085. Refactor o.a.h.nfs to support different types of authentications. Contributed by Jing Zhao. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521601 13f79535-47bb-0310-9956-ffa450edef68 --- .../apache/hadoop/mount/MountResponse.java | 4 +- .../nfs/{security => }/AccessPrivilege.java | 2 +- .../hadoop/nfs/{security => }/NfsExports.java | 2 +- .../apache/hadoop/nfs/nfs3/IdUserGroup.java | 4 +- .../apache/hadoop/nfs/nfs3/Nfs3Constant.java | 3 + .../apache/hadoop/nfs/nfs3/Nfs3Interface.java | 104 +++++---- .../hadoop/oncrpc/RpcAcceptedReply.java | 6 +- .../org/apache/hadoop/oncrpc/RpcCall.java | 21 +- .../apache/hadoop/oncrpc/RpcDeniedReply.java | 2 +- .../java/org/apache/hadoop/oncrpc/XDR.java | 2 +- .../hadoop/oncrpc/security/Credentials.java | 53 +++++ .../CredentialsGSS.java} | 42 ++-- .../oncrpc/security/CredentialsNone.java | 43 ++++ .../oncrpc/security/CredentialsSys.java | 114 +++++++++ .../oncrpc/{ => security}/RpcAuthInfo.java | 27 +-- .../oncrpc/security/SecurityHandler.java | 63 +++++ .../oncrpc/security/SysSecurityHandler.java | 59 +++++ .../hadoop/oncrpc/security/Verifier.java | 49 ++++ .../hadoop/oncrpc/security/VerifierGSS.java | 41 ++++ .../hadoop/oncrpc/security/VerifierNone.java | 41 ++++ .../apache/hadoop/portmap/PortmapRequest.java | 14 +- .../nfs/{security => }/TestNfsExports.java | 4 +- .../hadoop/oncrpc/TestRpcAcceptedReply.java | 5 +- .../org/apache/hadoop/oncrpc/TestRpcCall.java | 10 +- .../TestCredentialsSys.java} | 37 +-- .../{ => security}/TestRpcAuthInfo.java | 16 +- .../hdfs/nfs/mount/RpcProgramMountd.java | 4 +- .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java | 217 +++++++++--------- .../apache/hadoop/hdfs/nfs/TestMountd.java | 2 +- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 + 30 files changed, 739 insertions(+), 255 deletions(-) rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/{security => }/AccessPrivilege.java (95%) rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/{security => }/NfsExports.java (99%) create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/{RpcAuthSys.java => security/CredentialsGSS.java} (56%) create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java rename hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/{ => security}/RpcAuthInfo.java (75%) create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java create mode 100644 hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java rename hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/{security => }/TestNfsExports.java (98%) rename hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/{TestRpcAuthSys.java => security/TestCredentialsSys.java} (58%) rename hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/{ => security}/TestRpcAuthInfo.java (78%) diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java index 3839acc1966..a9131e3b509 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountResponse.java @@ -19,10 +19,10 @@ package org.apache.hadoop.mount; import java.util.List; -import org.apache.hadoop.nfs.security.NfsExports; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.XDR; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; /** * Helper class for sending MountResponse diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/AccessPrivilege.java similarity index 95% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/AccessPrivilege.java index 8789ecfb4e3..073d08d567a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/AccessPrivilege.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/AccessPrivilege.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.nfs.security; +package org.apache.hadoop.nfs; public enum AccessPrivilege { READ_ONLY, diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java similarity index 99% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java index 301f2f0ff72..afe630b9b03 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/security/NfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/NfsExports.java @@ -15,7 +15,7 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.nfs.security; +package org.apache.hadoop.nfs; import java.net.InetAddress; import java.util.ArrayList; diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java index b5c5aa3789d..c0be5dd2890 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/IdUserGroup.java @@ -147,7 +147,7 @@ public class IdUserGroup { synchronized public String getUserName(int uid, String unknown) { checkAndUpdateMaps(); - String uname = uidNameMap.get(Integer.valueOf(uid)); + String uname = uidNameMap.get(uid); if (uname == null) { uname = unknown; } @@ -156,7 +156,7 @@ public class IdUserGroup { synchronized public String getGroupName(int gid, String unknown) { checkAndUpdateMaps(); - String gname = gidNameMap.get(Integer.valueOf(gid)); + String gname = gidNameMap.get(gid); if (gname == null) { gname = unknown; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java index 706c99f47c4..bedb58f0c4a 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Constant.java @@ -205,4 +205,7 @@ public class Nfs3Constant { public static final String FILE_DUMP_DIR_DEFAULT = "/tmp/.hdfs-nfs"; public static final String ENABLE_FILE_DUMP_KEY = "dfs.nfs3.enableDump"; public static final boolean ENABLE_FILE_DUMP_DEFAULT = true; + + public final static String UNKNOWN_USER = "nobody"; + public final static String UNKNOWN_GROUP = "nobody"; } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java index 678631174dd..8486e2ae50e 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Interface.java @@ -20,67 +20,83 @@ package org.apache.hadoop.nfs.nfs3; import java.net.InetAddress; import org.apache.hadoop.nfs.nfs3.response.NFS3Response; -import org.apache.hadoop.oncrpc.RpcAuthSys; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.SecurityHandler; import org.jboss.netty.channel.Channel; /** * RPC procedures as defined in RFC 1813. */ public interface Nfs3Interface { - + /** NULL: Do nothing */ public NFS3Response nullProcedure(); - + /** GETATTR: Get file attributes */ - public NFS3Response getattr(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response getattr(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** SETATTR: Set file attributes */ - public NFS3Response setattr(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response setattr(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** LOOKUP: Lookup filename */ - public NFS3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** ACCESS: Check access permission */ - public NFS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response lookup(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** ACCESS: Check access permission */ + public NFS3Response access(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** READ: Read from file */ - public NFS3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response read(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** WRITE: Write to file */ public NFS3Response write(XDR xdr, Channel channel, int xid, - RpcAuthSys authSys, InetAddress client); - - /** CREATE: Create a file */ - public NFS3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** MKDIR: Create a directory */ - public NFS3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** REMOVE: Remove a file */ - public NFS3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** RMDIR: Remove a directory */ - public NFS3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client); - + SecurityHandler securityHandler, InetAddress client); + + /** CREATE: Create a file */ + public NFS3Response create(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** MKDIR: Create a directory */ + public NFS3Response mkdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** REMOVE: Remove a file */ + public NFS3Response remove(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** RMDIR: Remove a directory */ + public NFS3Response rmdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** RENAME: Rename a file or directory */ - public NFS3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** SYMLINK: Create a symbolic link */ - public NFS3Response symlink(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response rename(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** SYMLINK: Create a symbolic link */ + public NFS3Response symlink(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** READDIR: Read From directory */ - public NFS3Response readdir(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** FSSTAT: Get dynamic file system information */ - public NFS3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response readdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** FSSTAT: Get dynamic file system information */ + public NFS3Response fsstat(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** FSINFO: Get static file system information */ - public NFS3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client); - + public NFS3Response fsinfo(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + /** PATHCONF: Retrieve POSIX information */ - public NFS3Response pathconf(XDR xdr, RpcAuthSys authSys, InetAddress client); - - /** COMMIT: Commit cached data on a server to stable storage */ - public NFS3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client); + public NFS3Response pathconf(XDR xdr, SecurityHandler securityHandler, + InetAddress client); + + /** COMMIT: Commit cached data on a server to stable storage */ + public NFS3Response commit(XDR xdr, SecurityHandler securityHandler, + InetAddress client); } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java index b6bd5e7c7b9..c909c283d7d 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAcceptedReply.java @@ -17,7 +17,9 @@ */ package org.apache.hadoop.oncrpc; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; /** * Represents RPC message MSG_ACCEPTED reply body. See RFC 1831 for details. @@ -54,7 +56,7 @@ public class RpcAcceptedReply extends RpcReply { public static RpcAcceptedReply read(int xid, RpcMessage.Type messageType, ReplyState replyState, XDR xdr) { - RpcAuthInfo verifier = RpcAuthInfo.read(xdr); + Verifier verifier = Verifier.readFlavorAndVerifier(xdr); AcceptState acceptState = AcceptState.fromValue(xdr.readInt()); return new RpcAcceptedReply(xid, messageType, replyState, verifier, acceptState); diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java index ed0f111602c..4c872da8557 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcCall.java @@ -19,6 +19,8 @@ package org.apache.hadoop.oncrpc; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; /** * Represents an RPC message of type RPC call as defined in RFC 1831 @@ -30,11 +32,12 @@ public class RpcCall extends RpcMessage { private final int program; private final int version; private final int procedure; - private final RpcAuthInfo credential; - private final RpcAuthInfo verifier; + private final Credentials credential; + private final Verifier verifier; - protected RpcCall(int xid, RpcMessage.Type messageType, int rpcVersion, int program, - int version, int procedure, RpcAuthInfo credential, RpcAuthInfo verifier) { + protected RpcCall(int xid, RpcMessage.Type messageType, int rpcVersion, + int program, int version, int procedure, Credentials credential, + Verifier verifier) { super(xid, messageType); this.rpcVersion = rpcVersion; this.program = program; @@ -79,19 +82,19 @@ public class RpcCall extends RpcMessage { return procedure; } - public RpcAuthInfo getCredential() { + public Credentials getCredential() { return credential; } - public RpcAuthInfo getVerifier() { + public Verifier getVerifier() { return verifier; } public static RpcCall read(XDR xdr) { return new RpcCall(xdr.readInt(), RpcMessage.Type.fromValue(xdr.readInt()), - xdr.readInt(), xdr.readInt(), - xdr.readInt(), xdr.readInt(), RpcAuthInfo.read(xdr), - RpcAuthInfo.read(xdr)); + xdr.readInt(), xdr.readInt(), xdr.readInt(), xdr.readInt(), + Credentials.readFlavorAndCredentials(xdr), + Verifier.readFlavorAndVerifier(xdr)); } public static void write(XDR out, int xid, int program, int progVersion, diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java index 4b91edb397f..8a9af096d1f 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcDeniedReply.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.oncrpc; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; /** * Represents RPC message MSG_DENIED reply body. See RFC 1831 for details. diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java index 40633e286d8..c2f3d06b996 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/XDR.java @@ -280,7 +280,7 @@ public class XDR { public byte[] readVariableOpaque() { int size = this.readInt(); - return size != 0 ? this.readFixedOpaque(size) : null; + return size != 0 ? this.readFixedOpaque(size) : new byte[0]; } public void skipVariableOpaque() { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java new file mode 100644 index 00000000000..8f641d885aa --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Credentials.java @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.oncrpc.XDR; + +/** + * Base class for all credentials. Currently we only support 3 different types + * of auth flavors: AUTH_NONE, AUTH_SYS, and RPCSEC_GSS. + */ +public abstract class Credentials extends RpcAuthInfo { + public static final Log LOG = LogFactory.getLog(Credentials.class); + + public static Credentials readFlavorAndCredentials(XDR xdr) { + AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt()); + final Credentials credentials; + if(flavor == AuthFlavor.AUTH_NONE) { + credentials = new CredentialsNone(); + } else if(flavor == AuthFlavor.AUTH_SYS) { + credentials = new CredentialsSys(); + } else if(flavor == AuthFlavor.RPCSEC_GSS) { + credentials = new CredentialsGSS(); + } else { + throw new UnsupportedOperationException("Unsupported Credentials Flavor " + + flavor); + } + credentials.read(xdr); + return credentials; + } + + protected int mCredentialsLength; + + protected Credentials(AuthFlavor flavor) { + super(flavor); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthSys.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsGSS.java similarity index 56% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthSys.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsGSS.java index dbedb3649e4..0dd80dc0895 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthSys.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsGSS.java @@ -15,37 +15,27 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; -/** - * AUTH_SYS as defined in RFC 1831 - */ -public class RpcAuthSys { - private final int uid; - private final int gid; +import org.apache.hadoop.oncrpc.XDR; - public RpcAuthSys(int uid, int gid) { - this.uid = uid; - this.gid = gid; - } - - public static RpcAuthSys from(byte[] credentials) { - XDR sys = new XDR(credentials); - sys.skip(4); // Stamp - sys.skipVariableOpaque(); // Machine name - return new RpcAuthSys(sys.readInt(), sys.readInt()); - } - - public int getUid() { - return uid; - } +/** Credential used by RPCSEC_GSS */ +public class CredentialsGSS extends Credentials { - public int getGid() { - return gid; + public CredentialsGSS() { + super(AuthFlavor.RPCSEC_GSS); } @Override - public String toString() { - return "(AuthSys: uid=" + uid + " gid=" + gid + ")"; + public void read(XDR xdr) { + // TODO Auto-generated method stub + } + + @Override + public void write(XDR xdr) { + // TODO Auto-generated method stub + + } + } diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java new file mode 100644 index 00000000000..753edba49fb --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsNone.java @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; + +import com.google.common.base.Preconditions; + +/** Credential used by AUTH_NONE */ +public class CredentialsNone extends Credentials { + + public CredentialsNone() { + super(AuthFlavor.AUTH_NONE); + mCredentialsLength = 0; + } + + @Override + public void read(XDR xdr) { + mCredentialsLength = xdr.readInt(); + Preconditions.checkState(mCredentialsLength == 0); + } + + @Override + public void write(XDR xdr) { + Preconditions.checkState(mCredentialsLength == 0); + xdr.writeInt(mCredentialsLength); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java new file mode 100644 index 00000000000..9ba12b8990e --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/CredentialsSys.java @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import java.net.InetAddress; +import java.net.UnknownHostException; + +import org.apache.hadoop.oncrpc.XDR; + +/** Credential used by AUTH_SYS */ +public class CredentialsSys extends Credentials { + + private static final String HOSTNAME; + static { + try { + String s = InetAddress.getLocalHost().getHostName(); + HOSTNAME = s; + if(LOG.isDebugEnabled()) { + LOG.debug("HOSTNAME = " + HOSTNAME); + } + } catch (UnknownHostException e) { + LOG.error("Error setting HOSTNAME", e); + throw new RuntimeException(e); + } + } + + protected int mUID, mGID; + protected int[] mAuxGIDs; + protected String mHostName; + protected int mStamp; + + public CredentialsSys() { + super(AuthFlavor.AUTH_SYS); + this.mCredentialsLength = 0; + this.mHostName = HOSTNAME; + } + + public int getGID() { + return mGID; + } + + public int getUID() { + return mUID; + } + + public void setGID(int gid) { + this.mGID = gid; + } + + public void setUID(int uid) { + this.mUID = uid; + } + + public void setStamp(int stamp) { + this.mStamp = stamp; + } + + @Override + public void read(XDR xdr) { + mCredentialsLength = xdr.readInt(); + + mStamp = xdr.readInt(); + mHostName = xdr.readString(); + mUID = xdr.readInt(); + mGID = xdr.readInt(); + + int length = xdr.readInt(); + mAuxGIDs = new int[length]; + for (int i = 0; i < length; i++) { + mAuxGIDs[i] = xdr.readInt(); + } + } + + @Override + public void write(XDR xdr) { + // mStamp + mHostName.length + mHostName + mUID + mGID + mAuxGIDs.count + mCredentialsLength = 20 + mHostName.getBytes().length; + // mAuxGIDs + if (mAuxGIDs != null && mAuxGIDs.length > 0) { + mCredentialsLength += mAuxGIDs.length * 4; + } + xdr.writeInt(mCredentialsLength); + + xdr.writeInt(mStamp); + xdr.writeString(mHostName); + xdr.writeInt(mUID); + xdr.writeInt(mGID); + + if((mAuxGIDs == null) || (mAuxGIDs.length == 0)) { + xdr.writeInt(0); + } else { + xdr.writeInt(mAuxGIDs.length); + for (int i = 0; i < mAuxGIDs.length; i++) { + xdr.writeInt(mAuxGIDs[i]); + } + } + } + +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthInfo.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/RpcAuthInfo.java similarity index 75% rename from hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthInfo.java rename to hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/RpcAuthInfo.java index a507d0d20de..65ebd304a25 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/RpcAuthInfo.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/RpcAuthInfo.java @@ -15,14 +15,14 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; -import java.util.Arrays; +import org.apache.hadoop.oncrpc.XDR; /** - * Authentication Info as defined in RFC 1831 + * Authentication Info. Base class of Verifier and Credential. */ -public class RpcAuthInfo { +public abstract class RpcAuthInfo { /** Different types of authentication as defined in RFC 1831 */ public enum AuthFlavor { AUTH_NONE(0), @@ -52,27 +52,20 @@ public class RpcAuthInfo { } private final AuthFlavor flavor; - private final byte[] body; - protected RpcAuthInfo(AuthFlavor flavor, byte[] body) { + protected RpcAuthInfo(AuthFlavor flavor) { this.flavor = flavor; - this.body = body; } - public static RpcAuthInfo read(XDR xdr) { - int type = xdr.readInt(); - AuthFlavor flavor = AuthFlavor.fromValue(type); - byte[] body = xdr.readVariableOpaque(); - return new RpcAuthInfo(flavor, body); - } + /** Load auth info */ + public abstract void read(XDR xdr); + + /** Write auth info */ + public abstract void write(XDR xdr); public AuthFlavor getFlavor() { return flavor; } - - public byte[] getBody() { - return Arrays.copyOf(body, body.length); - } @Override public String toString() { diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java new file mode 100644 index 00000000000..40004d0e786 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SecurityHandler.java @@ -0,0 +1,63 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.oncrpc.RpcCall; +import org.apache.hadoop.oncrpc.XDR; + +public abstract class SecurityHandler { + public static final Log LOG = LogFactory.getLog(SecurityHandler.class); + + public abstract String getUser(); + + public abstract boolean shouldSilentlyDrop(RpcCall request); + + public abstract Verifier getVerifer(RpcCall request) throws IOException; + + public boolean isUnwrapRequired() { + return false; + } + + public boolean isWrapRequired() { + return false; + } + + /** Used by GSS */ + public XDR unwrap(RpcCall request, byte[] data ) throws IOException { + throw new UnsupportedOperationException(); + } + + /** Used by GSS */ + public byte[] wrap(RpcCall request, XDR response) throws IOException { + throw new UnsupportedOperationException(); + } + + /** Used by AUTH_SYS */ + public int getUid() { + throw new UnsupportedOperationException(); + } + + /** Used by AUTH_SYS */ + public int getGid() { + throw new UnsupportedOperationException(); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java new file mode 100644 index 00000000000..196d3d8cbba --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/SysSecurityHandler.java @@ -0,0 +1,59 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.nfs.nfs3.IdUserGroup; +import org.apache.hadoop.nfs.nfs3.Nfs3Constant; +import org.apache.hadoop.oncrpc.RpcCall; + +public class SysSecurityHandler extends SecurityHandler { + + private final IdUserGroup iug; + private final CredentialsSys mCredentialsSys; + + public SysSecurityHandler(CredentialsSys credentialsSys, + IdUserGroup iug) { + this.mCredentialsSys = credentialsSys; + this.iug = iug; + } + + @Override + public String getUser() { + return iug.getUserName(mCredentialsSys.getUID(), Nfs3Constant.UNKNOWN_USER); + } + + @Override + public boolean shouldSilentlyDrop(RpcCall request) { + return false; + } + + @Override + public VerifierNone getVerifer(RpcCall request) { + return new VerifierNone(); + } + + @Override + public int getUid() { + return mCredentialsSys.getUID(); + } + + @Override + public int getGid() { + return mCredentialsSys.getGID(); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java new file mode 100644 index 00000000000..f8344b2800d --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/Verifier.java @@ -0,0 +1,49 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; + +/** + * Base class for verifier. Currently we only support 3 types of auth flavors: + * {@link AuthFlavor#AUTH_NONE}, {@link AuthFlavor#AUTH_SYS}, + * and {@link AuthFlavor#RPCSEC_GSS}. + */ +public abstract class Verifier extends RpcAuthInfo { + + protected Verifier(AuthFlavor flavor) { + super(flavor); + } + + public static Verifier readFlavorAndVerifier(XDR xdr) { + AuthFlavor flavor = AuthFlavor.fromValue(xdr.readInt()); + final Verifier verifer; + if(flavor == AuthFlavor.AUTH_NONE) { + verifer = new VerifierNone(); + } else if(flavor == AuthFlavor.RPCSEC_GSS) { + verifer = new VerifierGSS(); + } else { + throw new UnsupportedOperationException("Unsupported verifier flavor" + + flavor); + } + verifer.read(xdr); + return verifer; + } + +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java new file mode 100644 index 00000000000..7410070b127 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierGSS.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; + +/** Verifier mapped to RPCSEC_GSS. */ +public class VerifierGSS extends Verifier { + + public VerifierGSS() { + super(AuthFlavor.RPCSEC_GSS); + } + + @Override + public void read(XDR xdr) { + // TODO Auto-generated method stub + + } + + @Override + public void write(XDR xdr) { + // TODO Auto-generated method stub + + } + +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java new file mode 100644 index 00000000000..8bccd1b9be2 --- /dev/null +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/oncrpc/security/VerifierNone.java @@ -0,0 +1,41 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.oncrpc.security; + +import org.apache.hadoop.oncrpc.XDR; + +import com.google.common.base.Preconditions; + +/** Verifier used by AUTH_NONE. */ +public class VerifierNone extends Verifier { + + public VerifierNone() { + super(AuthFlavor.AUTH_NONE); + } + + @Override + public void read(XDR xdr) { + int length = xdr.readInt(); + Preconditions.checkState(length == 0); + } + + @Override + public void write(XDR xdr) { + xdr.writeInt(0); + } +} diff --git a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java index 11da7d44dc2..98dd1ffa635 100644 --- a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java +++ b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/portmap/PortmapRequest.java @@ -17,10 +17,14 @@ */ package org.apache.hadoop.portmap; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcUtil; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.VerifierNone; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.portmap.PortmapInterface.Procedure; /** @@ -38,9 +42,11 @@ public class PortmapRequest { RpcProgramPortmap.PROGRAM, RpcProgramPortmap.VERSION, Procedure.PMAPPROC_SET.getValue()); request.writeInt(AuthFlavor.AUTH_NONE.getValue()); - request.writeInt(0); - request.writeInt(0); - request.writeInt(0); + Credentials credential = new CredentialsNone(); + credential.write(request); + request.writeInt(AuthFlavor.AUTH_NONE.getValue()); + Verifier verifier = new VerifierNone(); + verifier.write(request); return mapping.serialize(request); } } diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java similarity index 98% rename from hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java index dbadd8ba339..9acb29e589a 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/security/TestNfsExports.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/nfs/TestNfsExports.java @@ -15,10 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.nfs.security; +package org.apache.hadoop.nfs; import junit.framework.Assert; +import org.apache.hadoop.nfs.AccessPrivilege; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.nfs3.Nfs3Constant; import org.junit.Test; diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java index fca0ff5a7f8..2daa48cce83 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAcceptedReply.java @@ -20,8 +20,9 @@ package org.apache.hadoop.oncrpc; import static org.junit.Assert.assertEquals; import org.apache.hadoop.oncrpc.RpcAcceptedReply.AcceptState; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.oncrpc.RpcReply.ReplyState; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.junit.Test; /** @@ -45,7 +46,7 @@ public class TestRpcAcceptedReply { @Test public void testConstructor() { - RpcAuthInfo verifier = new RpcAuthInfo(AuthFlavor.AUTH_NONE, new byte[0]); + Verifier verifier = new VerifierNone(); RpcAcceptedReply reply = new RpcAcceptedReply(0, RpcMessage.Type.RPC_REPLY, ReplyState.MSG_ACCEPTED, verifier, AcceptState.SUCCESS); assertEquals(0, reply.getXid()); diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java index eb168034524..b35931776d5 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcCall.java @@ -17,8 +17,12 @@ */ package org.apache.hadoop.oncrpc; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; import static org.junit.Assert.assertEquals; + +import org.apache.hadoop.oncrpc.security.CredentialsNone; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.VerifierNone; import org.junit.Test; /** @@ -28,8 +32,8 @@ public class TestRpcCall { @Test public void testConstructor() { - RpcAuthInfo credential = new RpcAuthInfo(AuthFlavor.AUTH_NONE, new byte[0]); - RpcAuthInfo verifier = new RpcAuthInfo(AuthFlavor.AUTH_NONE, new byte[0]); + Credentials credential = new CredentialsNone(); + Verifier verifier = new VerifierNone(); int rpcVersion = RpcCall.RPC_VERSION; int program = 2; int version = 3; diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthSys.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java similarity index 58% rename from hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthSys.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java index 474a1f73789..0fd183e782d 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthSys.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestCredentialsSys.java @@ -15,31 +15,32 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; import static org.junit.Assert.assertEquals; +import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsSys; import org.junit.Test; /** - * Test for {@link RpcAuthSys} + * Test for {@link CredentialsSys} */ -public class TestRpcAuthSys { +public class TestCredentialsSys { + @Test - public void testConstructor() { - RpcAuthSys auth = new RpcAuthSys(0, 1); - assertEquals(0, auth.getUid()); - assertEquals(1, auth.getGid()); - } - - @Test - public void testRead() { - byte[] bytes = {0, 1, 2, 3}; // 4 bytes Stamp - bytes = XDR.append(bytes, XDR.getVariableOpque(new byte[0])); - bytes = XDR.append(bytes, XDR.toBytes(0)); // gid - bytes = XDR.append(bytes, XDR.toBytes(1)); // uid - RpcAuthSys auth = RpcAuthSys.from(bytes); - assertEquals(0, auth.getUid()); - assertEquals(1, auth.getGid()); + public void testReadWrite() { + CredentialsSys credential = new CredentialsSys(); + credential.setUID(0); + credential.setGID(1); + + XDR xdr = new XDR(); + credential.write(xdr); + + CredentialsSys newCredential = new CredentialsSys(); + newCredential.read(xdr); + + assertEquals(0, newCredential.getUID()); + assertEquals(1, newCredential.getGID()); } } diff --git a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthInfo.java b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestRpcAuthInfo.java similarity index 78% rename from hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthInfo.java rename to hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestRpcAuthInfo.java index 0b8240bc605..755a6eb9497 100644 --- a/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/TestRpcAuthInfo.java +++ b/hadoop-common-project/hadoop-nfs/src/test/java/org/apache/hadoop/oncrpc/security/TestRpcAuthInfo.java @@ -15,14 +15,12 @@ * See the License for the specific language governing permissions and * limitations under the License. */ -package org.apache.hadoop.oncrpc; +package org.apache.hadoop.oncrpc.security; import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import java.util.Arrays; - -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.junit.Test; /** @@ -42,12 +40,4 @@ public class TestRpcAuthInfo { public void testInvalidAuthFlavor() { assertEquals(AuthFlavor.AUTH_NONE, AuthFlavor.fromValue(4)); } - - @Test - public void testConsturctor() { - byte[] body = new byte[0]; - RpcAuthInfo auth = new RpcAuthInfo(AuthFlavor.AUTH_NONE, body); - assertEquals(AuthFlavor.AUTH_NONE, auth.getFlavor()); - assertTrue(Arrays.equals(body, auth.getBody())); - } } diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java index 5b5ea511d19..1ff7f3f45b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/mount/RpcProgramMountd.java @@ -32,10 +32,10 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.mount.MountEntry; import org.apache.hadoop.mount.MountInterface; import org.apache.hadoop.mount.MountResponse; +import org.apache.hadoop.nfs.AccessPrivilege; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.Nfs3Status; -import org.apache.hadoop.nfs.security.AccessPrivilege; -import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcProgram; diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java index 404cf3e73ca..b935119b6be 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/main/java/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java @@ -42,6 +42,8 @@ import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.HdfsConstants; import org.apache.hadoop.hdfs.protocol.HdfsFileStatus; import org.apache.hadoop.hdfs.server.namenode.NameNode; +import org.apache.hadoop.nfs.AccessPrivilege; +import org.apache.hadoop.nfs.NfsExports; import org.apache.hadoop.nfs.NfsTime; import org.apache.hadoop.nfs.nfs3.FileHandle; import org.apache.hadoop.nfs.nfs3.IdUserGroup; @@ -96,16 +98,18 @@ import org.apache.hadoop.nfs.nfs3.response.VoidResponse; import org.apache.hadoop.nfs.nfs3.response.WRITE3Response; import org.apache.hadoop.nfs.nfs3.response.WccAttr; import org.apache.hadoop.nfs.nfs3.response.WccData; -import org.apache.hadoop.nfs.security.AccessPrivilege; -import org.apache.hadoop.nfs.security.NfsExports; import org.apache.hadoop.oncrpc.RpcAcceptedReply; -import org.apache.hadoop.oncrpc.RpcAuthInfo.AuthFlavor; -import org.apache.hadoop.oncrpc.RpcAuthSys; import org.apache.hadoop.oncrpc.RpcCall; import org.apache.hadoop.oncrpc.RpcDeniedReply; import org.apache.hadoop.oncrpc.RpcProgram; import org.apache.hadoop.oncrpc.RpcReply; import org.apache.hadoop.oncrpc.XDR; +import org.apache.hadoop.oncrpc.security.CredentialsSys; +import org.apache.hadoop.oncrpc.security.Credentials; +import org.apache.hadoop.oncrpc.security.Verifier; +import org.apache.hadoop.oncrpc.security.SecurityHandler; +import org.apache.hadoop.oncrpc.security.SysSecurityHandler; +import org.apache.hadoop.oncrpc.security.RpcAuthInfo.AuthFlavor; import org.apache.hadoop.security.AccessControlException; import org.jboss.netty.channel.Channel; @@ -205,8 +209,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public GETATTR3Response getattr(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public GETATTR3Response getattr(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { GETATTR3Response response = new GETATTR3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -214,8 +218,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -268,9 +271,9 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { if (updateFields.contains(SetAttrField.UID) || updateFields.contains(SetAttrField.GID)) { String uname = updateFields.contains(SetAttrField.UID) ? iug.getUserName( - newAttr.getUid(), UNKNOWN_USER) : null; + newAttr.getUid(), Nfs3Constant.UNKNOWN_USER) : null; String gname = updateFields.contains(SetAttrField.GID) ? iug - .getGroupName(newAttr.getGid(), UNKNOWN_GROUP) : null; + .getGroupName(newAttr.getGid(), Nfs3Constant.UNKNOWN_GROUP) : null; dfsClient.setOwner(fileIdPath, uname, gname); } @@ -287,11 +290,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public SETATTR3Response setattr(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public SETATTR3Response setattr(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { SETATTR3Response response = new SETATTR3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -364,7 +366,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public LOOKUP3Response lookup(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public LOOKUP3Response lookup(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { LOOKUP3Response response = new LOOKUP3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -372,8 +375,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -426,7 +428,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public ACCESS3Response access(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public ACCESS3Response access(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { ACCESS3Response response = new ACCESS3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -434,8 +437,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -464,8 +466,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { LOG.error("Can't get path for fileId:" + handle.getFileId()); return new ACCESS3Response(Nfs3Status.NFS3ERR_STALE); } - int access = Nfs3Utils.getAccessRightsForUserGroup(authSys.getUid(), - authSys.getGid(), attrs); + int access = Nfs3Utils.getAccessRightsForUserGroup( + securityHandler.getUid(), securityHandler.getGid(), attrs); return new ACCESS3Response(Nfs3Status.NFS3_OK, attrs, access); } catch (IOException e) { @@ -474,13 +476,14 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public READLINK3Response readlink(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public READLINK3Response readlink(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { return new READLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override - public READ3Response read(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public READ3Response read(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { READ3Response response = new READ3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -488,8 +491,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -534,8 +536,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } return new READ3Response(Nfs3Status.NFS3ERR_NOENT); } - int access = Nfs3Utils.getAccessRightsForUserGroup(authSys.getUid(), - authSys.getGid(), attrs); + int access = Nfs3Utils.getAccessRightsForUserGroup( + securityHandler.getUid(), securityHandler.getGid(), attrs); if ((access & Nfs3Constant.ACCESS3_READ) != 0) { eof = offset < attrs.getSize() ? false : true; return new READ3Response(Nfs3Status.NFS3_OK, attrs, 0, eof, @@ -578,10 +580,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { @Override public WRITE3Response write(XDR xdr, Channel channel, int xid, - RpcAuthSys authSys, InetAddress client) { + SecurityHandler securityHandler, InetAddress client) { WRITE3Response response = new WRITE3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -653,10 +655,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public CREATE3Response create(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public CREATE3Response create(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { CREATE3Response response = new CREATE3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -725,7 +727,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { // Set group if it's not specified in the request. if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) { setAttr3.getUpdateFields().add(SetAttrField.GID); - setAttr3.setGid(authSys.getGid()); + setAttr3.setGid(securityHandler.getGid()); } setattrInternal(dfsClient, fileIdPath, setAttr3, false); } @@ -776,10 +778,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public MKDIR3Response mkdir(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public MKDIR3Response mkdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { MKDIR3Response response = new MKDIR3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -834,7 +836,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { // Set group if it's not specified in the request. if (!setAttr3.getUpdateFields().contains(SetAttrField.GID)) { setAttr3.getUpdateFields().add(SetAttrField.GID); - setAttr3.setGid(authSys.getGid()); + setAttr3.setGid(securityHandler.getGid()); } setattrInternal(dfsClient, fileIdPath, setAttr3, false); @@ -866,15 +868,16 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } } - public READDIR3Response mknod(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public READDIR3Response mknod(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override - public REMOVE3Response remove(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public REMOVE3Response remove(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { REMOVE3Response response = new REMOVE3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -947,10 +950,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public RMDIR3Response rmdir(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public RMDIR3Response rmdir(XDR xdr, SecurityHandler securityHandler, + InetAddress client) { RMDIR3Response response = new RMDIR3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1030,10 +1033,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public RENAME3Response rename(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public RENAME3Response rename(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { RENAME3Response response = new RENAME3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1118,18 +1121,18 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public SYMLINK3Response symlink(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public SYMLINK3Response symlink(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { return new SYMLINK3Response(Nfs3Status.NFS3ERR_NOTSUPP); } - public READDIR3Response link(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public READDIR3Response link(XDR xdr, SecurityHandler securityHandler, InetAddress client) { return new READDIR3Response(Nfs3Status.NFS3ERR_NOTSUPP); } @Override - public READDIR3Response readdir(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public READDIR3Response readdir(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { READDIR3Response response = new READDIR3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1137,8 +1140,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1269,14 +1271,13 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { dirStatus.getModificationTime(), dirList); } - public READDIRPLUS3Response readdirplus(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public READDIRPLUS3Response readdirplus(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_ACCES); } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { return new READDIRPLUS3Response(Nfs3Status.NFS3ERR_SERVERFAULT); } @@ -1420,7 +1421,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public FSSTAT3Response fsstat(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public FSSTAT3Response fsstat(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { FSSTAT3Response response = new FSSTAT3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1428,8 +1430,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1478,7 +1479,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public FSINFO3Response fsinfo(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public FSINFO3Response fsinfo(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { FSINFO3Response response = new FSINFO3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1486,8 +1488,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1530,8 +1531,8 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public PATHCONF3Response pathconf(XDR xdr, RpcAuthSys authSys, - InetAddress client) { + public PATHCONF3Response pathconf(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { PATHCONF3Response response = new PATHCONF3Response(Nfs3Status.NFS3_OK); if (!checkAccessPrivilege(client, AccessPrivilege.READ_ONLY)) { @@ -1539,8 +1540,7 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { return response; } - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1578,10 +1578,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { } @Override - public COMMIT3Response commit(XDR xdr, RpcAuthSys authSys, InetAddress client) { + public COMMIT3Response commit(XDR xdr, + SecurityHandler securityHandler, InetAddress client) { COMMIT3Response response = new COMMIT3Response(Nfs3Status.NFS3_OK); - String uname = authSysCheck(authSys); - DFSClient dfsClient = clientCache.get(uname); + DFSClient dfsClient = clientCache.get(securityHandler.getUser()); if (dfsClient == null) { response.setStatus(Nfs3Status.NFS3ERR_SERVERFAULT); return response; @@ -1645,12 +1645,15 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { Nfs3Constant.WRITE_COMMIT_VERF); } } - - private final static String UNKNOWN_USER = "nobody"; - private final static String UNKNOWN_GROUP = "nobody"; - private String authSysCheck(RpcAuthSys authSys) { - return iug.getUserName(authSys.getUid(), UNKNOWN_USER); + private SecurityHandler getSecurityHandler(Credentials credentials, + Verifier verifier) { + if (credentials instanceof CredentialsSys) { + return new SysSecurityHandler((CredentialsSys) credentials, iug); + } else { + // TODO: support GSS and handle other cases + return null; + } } @Override @@ -1658,67 +1661,71 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface { InetAddress client, Channel channel) { final NFSPROC3 nfsproc3 = NFSPROC3.fromValue(rpcCall.getProcedure()); int xid = rpcCall.getXid(); - RpcAuthSys authSys = null; - + + Credentials credentials = rpcCall.getCredential(); // Ignore auth only for NFSPROC3_NULL, especially for Linux clients. if (nfsproc3 != NFSPROC3.NULL) { - if (rpcCall.getCredential().getFlavor() != AuthFlavor.AUTH_SYS) { + if (rpcCall.getCredential().getFlavor() != AuthFlavor.AUTH_SYS + && rpcCall.getCredential().getFlavor() != AuthFlavor.RPCSEC_GSS) { LOG.info("Wrong RPC AUTH flavor, " - + rpcCall.getCredential().getFlavor() + " is not AUTH_SYS."); + + rpcCall.getCredential().getFlavor() + + " is not AUTH_SYS or RPCSEC_GSS."); XDR reply = new XDR(); reply = RpcDeniedReply.voidReply(reply, xid, RpcReply.ReplyState.MSG_ACCEPTED, RpcDeniedReply.RejectState.AUTH_ERROR); return reply; } - authSys = RpcAuthSys.from(rpcCall.getCredential().getBody()); } + SecurityHandler securityHandler = getSecurityHandler(credentials, + rpcCall.getVerifier()); + NFS3Response response = null; if (nfsproc3 == NFSPROC3.NULL) { response = nullProcedure(); } else if (nfsproc3 == NFSPROC3.GETATTR) { - response = getattr(xdr, authSys, client); + response = getattr(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.SETATTR) { - response = setattr(xdr, authSys, client); + response = setattr(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.LOOKUP) { - response = lookup(xdr, authSys, client); + response = lookup(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.ACCESS) { - response = access(xdr, authSys, client); + response = access(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READLINK) { - response = readlink(xdr, authSys, client); + response = readlink(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READ) { - response = read(xdr, authSys, client); + response = read(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.WRITE) { - response = write(xdr, channel, xid, authSys, client); + response = write(xdr, channel, xid, securityHandler, client); } else if (nfsproc3 == NFSPROC3.CREATE) { - response = create(xdr, authSys, client); + response = create(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.MKDIR) { - response = mkdir(xdr, authSys, client); + response = mkdir(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.SYMLINK) { - response = symlink(xdr, authSys, client); + response = symlink(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.MKNOD) { - response = mknod(xdr, authSys, client); + response = mknod(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.REMOVE) { - response = remove(xdr, authSys, client); + response = remove(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.RMDIR) { - response = rmdir(xdr, authSys, client); + response = rmdir(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.RENAME) { - response = rename(xdr, authSys, client); + response = rename(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.LINK) { - response = link(xdr, authSys, client); + response = link(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READDIR) { - response = readdir(xdr, authSys, client); + response = readdir(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.READDIRPLUS) { - response = readdirplus(xdr, authSys, client); + response = readdirplus(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.FSSTAT) { - response = fsstat(xdr, authSys, client); + response = fsstat(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.FSINFO) { - response = fsinfo(xdr, authSys, client); + response = fsinfo(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.PATHCONF) { - response = pathconf(xdr, authSys, client); + response = pathconf(xdr, securityHandler, client); } else if (nfsproc3 == NFSPROC3.COMMIT) { - response = commit(xdr, authSys, client); + response = commit(xdr, securityHandler, client); } else { // Invalid procedure RpcAcceptedReply.voidReply(out, xid, diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java index 0e51340e49d..7965a140d7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java +++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/src/test/java/org/apache/hadoop/hdfs/nfs/TestMountd.java @@ -42,7 +42,7 @@ public class TestMountd { // Start minicluster Configuration config = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1) - .manageNameDfsDirs(false).build(); + .build(); cluster.waitActive(); // Start nfs diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 735dd2634e4..ee6ff0f922a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -330,6 +330,9 @@ Release 2.1.1-beta - UNRELEASED HDFS-5118. Provide testing support for DFSClient to drop RPC responses. (jing9) + HDFS-5085. Refactor o.a.h.nfs to support different types of + authentications. (jing9) + IMPROVEMENTS HDFS-4513. Clarify in the WebHDFS REST API that all JSON respsonses may From 220b21732b4a08ebf3a51f4d5ba64d0acc7c4b39 Mon Sep 17 00:00:00 2001 From: Jonathan Turner Eagles Date: Tue, 10 Sep 2013 19:53:49 +0000 Subject: [PATCH 146/153] YARN-1119. Add ClusterMetrics checks to tho TestRMNodeTransitions tests (Mit Desai via jeagles) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521611 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../TestRMNodeTransitions.java | 42 +++++++++++++++++++ 2 files changed, 45 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 76365a8b885..646b6b25c55 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -1294,6 +1294,9 @@ Release 0.23.10 - UNRELEASED YARN-985. Nodemanager should log where a resource was localized (Ravi Prakash via jeagles) + YARN-1119. Add ClusterMetrics checks to tho TestRMNodeTransitions tests + (Mit Desai via jeagles) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 694d2826bfe..608fa6449b4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -260,7 +260,21 @@ public class TestRMNodeTransitions { @Test public void testRunningExpire() { RMNodeImpl node = getRunningNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.EXPIRE)); + Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost + 1, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.LOST, node.getState()); } @@ -297,8 +311,22 @@ public class TestRMNodeTransitions { @Test public void testRunningDecommission() { RMNodeImpl node = getRunningNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.DECOMMISSION)); + Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned + 1, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.DECOMMISSIONED, node.getState()); } @@ -327,8 +355,22 @@ public class TestRMNodeTransitions { @Test public void testRunningRebooting() { RMNodeImpl node = getRunningNode(); + ClusterMetrics cm = ClusterMetrics.getMetrics(); + int initialActive = cm.getNumActiveNMs(); + int initialLost = cm.getNumLostNMs(); + int initialUnhealthy = cm.getUnhealthyNMs(); + int initialDecommissioned = cm.getNumDecommisionedNMs(); + int initialRebooted = cm.getNumRebootedNMs(); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.REBOOTING)); + Assert.assertEquals("Active Nodes", initialActive - 1, cm.getNumActiveNMs()); + Assert.assertEquals("Lost Nodes", initialLost, cm.getNumLostNMs()); + Assert.assertEquals("Unhealthy Nodes", + initialUnhealthy, cm.getUnhealthyNMs()); + Assert.assertEquals("Decommissioned Nodes", + initialDecommissioned, cm.getNumDecommisionedNMs()); + Assert.assertEquals("Rebooted Nodes", + initialRebooted + 1, cm.getNumRebootedNMs()); Assert.assertEquals(NodeState.REBOOTED, node.getState()); } From b5c6ff164a9a13f04cbe48d1fc14ceb71154525a Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Tue, 10 Sep 2013 22:51:27 +0000 Subject: [PATCH 147/153] YARN-1025. ResourceManager and NodeManager do not load native libraries on Windows. Contributed by Chris Nauroth. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521670 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 +++ hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd | 4 ++++ 2 files changed, 7 insertions(+) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 646b6b25c55..270dd247e6a 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -182,6 +182,9 @@ Release 2.1.1-beta - UNRELEASED data structures thread safe to avoid RM crashing with ArrayIndexOutOfBoundsException. (Zhijie Shen via vinodkv) + YARN-1025. ResourceManager and NodeManager do not load native libraries on + Windows. (cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd index 031cce7bd6b..955df46245b 100644 --- a/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd +++ b/hadoop-yarn-project/hadoop-yarn/bin/yarn.cmd @@ -135,6 +135,10 @@ if "%1" == "--config" ( call :%yarn-command% %yarn-command-arguments% + if defined JAVA_LIBRARY_PATH ( + set YARN_OPTS=%YARN_OPTS% -Djava.library.path=%JAVA_LIBRARY_PATH% + ) + set java_arguments=%JAVA_HEAP_MAX% %YARN_OPTS% -classpath %CLASSPATH% %CLASS% %yarn-command-arguments% call %JAVA% %java_arguments% From 1c1ebc1553650ac8e4486faf21f0d95150f607ad Mon Sep 17 00:00:00 2001 From: Vinod Kumar Vavilapalli Date: Wed, 11 Sep 2013 00:38:49 +0000 Subject: [PATCH 148/153] MAPREDUCE-5497. Changed MRAppMaster to sleep only after doing everything else but just before ClientService to avoid race conditions during RM restart. Contributed by Jian He. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1521699 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 + .../hadoop/mapreduce/v2/app/MRAppMaster.java | 26 ++-- .../v2/app/client/ClientService.java | 40 +++--- .../v2/app/client/MRClientService.java | 5 +- .../apache/hadoop/mapreduce/v2/app/MRApp.java | 3 +- .../app/TestMRAppComponentDependencies.java | 121 ++++++++++++++++++ .../mapreduce/v2/app/TestStagingCleanup.java | 33 +---- 7 files changed, 170 insertions(+), 62 deletions(-) create mode 100644 hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 24e7da1a7ba..3dc14de172b 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -186,6 +186,10 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5478. TeraInputFormat unnecessarily defines its own FileSplit subclass (Sandy Ryza) + MAPREDUCE-5497. Changed MRAppMaster to sleep only after doing everything else + but just before ClientService to avoid race conditions during RM restart. + (Jian He via vinodkv) + OPTIMIZATIONS MAPREDUCE-5446. TestJobHistoryEvents and TestJobHistoryParsing have race diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java index 58f4deef3a1..24db757e4f6 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java @@ -362,7 +362,10 @@ public class MRAppMaster extends CompositeService { //service to handle requests from JobClient clientService = createClientService(context); - addIfService(clientService); + // Init ClientService separately so that we stop it separately, since this + // service needs to wait some time before it stops so clients can know the + // final states + clientService.init(conf); containerAllocator = createContainerAllocator(clientService, context); @@ -425,7 +428,6 @@ public class MRAppMaster extends CompositeService { // queued inside the JobHistoryEventHandler addIfService(historyService); } - super.serviceInit(conf); } // end of init() @@ -534,14 +536,6 @@ public class MRAppMaster extends CompositeService { } } - // TODO:currently just wait for some time so clients can know the - // final states. Will be removed once RM come on. - try { - Thread.sleep(5000); - } catch (InterruptedException e) { - e.printStackTrace(); - } - try { //if isLastAMRetry comes as true, should never set it to false if ( !isLastAMRetry){ @@ -556,6 +550,14 @@ public class MRAppMaster extends CompositeService { LOG.info("Calling stop for all the services"); MRAppMaster.this.stop(); + // TODO: Stop ClientService last, since only ClientService should wait for + // some time so clients can know the final states. Will be removed once RM come on. + try { + Thread.sleep(5000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + clientService.stop(); } catch (Throwable t) { LOG.warn("Graceful stop failed ", t); } @@ -1019,8 +1021,10 @@ public class MRAppMaster extends CompositeService { LOG.info("MRAppMaster launching normal, non-uberized, multi-container " + "job " + job.getID() + "."); } + // Start ClientService here, since it's not initialized if + // errorHappenedShutDown is true + clientService.start(); } - //start all the components super.serviceStart(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java index a4c0a0d14bd..4727ffd8710 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/ClientService.java @@ -1,28 +1,30 @@ /** -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, software -* distributed under the License is distributed on an "AS IS" BASIS, -* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -* See the License for the specific language governing permissions and -* limitations under the License. -*/ + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ package org.apache.hadoop.mapreduce.v2.app.client; import java.net.InetSocketAddress; -public interface ClientService { +import org.apache.hadoop.service.Service; - InetSocketAddress getBindAddress(); +public interface ClientService extends Service { - int getHttpPort(); + public abstract InetSocketAddress getBindAddress(); + + public abstract int getHttpPort(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index d36bf62fdf0..181fd3740a9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -94,8 +94,7 @@ import org.apache.hadoop.yarn.webapp.WebApps; * jobclient (user facing). * */ -public class MRClientService extends AbstractService - implements ClientService { +public class MRClientService extends AbstractService implements ClientService { static final Log LOG = LogFactory.getLog(MRClientService.class); @@ -106,7 +105,7 @@ public class MRClientService extends AbstractService private AppContext appContext; public MRClientService(AppContext appContext) { - super("MRClientService"); + super(MRClientService.class.getName()); this.appContext = appContext; this.protocolHandler = new MRClientProtocolHandler(); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index 6d1804f0627..76fd00ad848 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -55,6 +55,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; +import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEvent; import org.apache.hadoop.mapreduce.v2.app.commit.CommitterEventHandler; import org.apache.hadoop.mapreduce.v2.app.job.Job; @@ -603,7 +604,7 @@ public class MRApp extends MRAppMaster { @Override protected ClientService createClientService(AppContext context) { - return new ClientService(){ + return new MRClientService(context) { @Override public InetSocketAddress getBindAddress() { return NetUtils.createSocketAddr("localhost:9876"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java new file mode 100644 index 00000000000..52ee2c7017d --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppComponentDependencies.java @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.mapreduce.v2.app; + +import java.io.IOException; + +import junit.framework.Assert; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; +import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; +import org.apache.hadoop.mapreduce.v2.api.records.JobState; +import org.apache.hadoop.mapreduce.v2.app.client.ClientService; +import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; +import org.apache.hadoop.mapreduce.v2.app.job.Job; +import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; +import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; +import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.exceptions.YarnRuntimeException; +import org.junit.Test; + +public class TestMRAppComponentDependencies { + + @Test(timeout = 20000) + public void testComponentStopOrder() throws Exception { + @SuppressWarnings("resource") + TestMRApp app = new TestMRApp(1, 1, true, this.getClass().getName(), true); + JobImpl job = (JobImpl) app.submit(new Configuration()); + app.waitForState(job, JobState.SUCCEEDED); + app.verifyCompleted(); + + int waitTime = 20 * 1000; + while (waitTime > 0 && app.numStops < 2) { + Thread.sleep(100); + waitTime -= 100; + } + + // assert JobHistoryEventHandlerStopped and then clientServiceStopped + Assert.assertEquals(1, app.JobHistoryEventHandlerStopped); + Assert.assertEquals(2, app.clientServiceStopped); + } + + private final class TestMRApp extends MRApp { + int JobHistoryEventHandlerStopped; + int clientServiceStopped; + int numStops; + + public TestMRApp(int maps, int reduces, boolean autoComplete, + String testName, boolean cleanOnStart) { + super(maps, reduces, autoComplete, testName, cleanOnStart); + JobHistoryEventHandlerStopped = 0; + clientServiceStopped = 0; + numStops = 0; + } + + @Override + protected Job createJob(Configuration conf, JobStateInternal forcedState, + String diagnostic) { + UserGroupInformation currentUser = null; + try { + currentUser = UserGroupInformation.getCurrentUser(); + } catch (IOException e) { + throw new YarnRuntimeException(e); + } + Job newJob = + new TestJob(getJobId(), getAttemptID(), conf, getDispatcher() + .getEventHandler(), getTaskAttemptListener(), getContext() + .getClock(), getCommitter(), isNewApiCommitter(), + currentUser.getUserName(), getContext(), forcedState, diagnostic); + ((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob); + + getDispatcher().register(JobFinishEvent.Type.class, + createJobFinishEventHandler()); + + return newJob; + } + + @Override + protected ClientService createClientService(AppContext context) { + return new MRClientService(context) { + @Override + public void serviceStop() throws Exception { + numStops++; + clientServiceStopped = numStops; + super.serviceStop(); + } + }; + } + + @Override + protected EventHandler createJobHistoryHandler( + AppContext context) { + return new JobHistoryEventHandler(context, getStartCount()) { + @Override + public void serviceStop() throws Exception { + numStops++; + JobHistoryEventHandlerStopped = numStops; + super.serviceStop(); + } + }; + } + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java index a0c0cb6c35f..496c1e35068 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java @@ -41,6 +41,7 @@ import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.JobState; import org.apache.hadoop.mapreduce.v2.app.client.ClientService; +import org.apache.hadoop.mapreduce.v2.app.client.MRClientService; import org.apache.hadoop.mapreduce.v2.app.job.Job; import org.apache.hadoop.mapreduce.v2.app.job.JobStateInternal; import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent; @@ -284,14 +285,12 @@ import org.junit.Test; private final class MRAppTestCleanup extends MRApp { int stagingDirCleanedup; int ContainerAllocatorStopped; - int JobHistoryEventHandlerStopped; int numStops; public MRAppTestCleanup(int maps, int reduces, boolean autoComplete, String testName, boolean cleanOnStart) { super(maps, reduces, autoComplete, testName, cleanOnStart); stagingDirCleanedup = 0; ContainerAllocatorStopped = 0; - JobHistoryEventHandlerStopped = 0; numStops = 0; } @@ -318,26 +317,6 @@ import org.junit.Test; return newJob; } - @Override - protected EventHandler createJobHistoryHandler( - AppContext context) { - return new TestJobHistoryEventHandler(context, getStartCount()); - } - - private class TestJobHistoryEventHandler extends JobHistoryEventHandler { - - public TestJobHistoryEventHandler(AppContext context, int startCount) { - super(context, startCount); - } - - @Override - public void serviceStop() throws Exception { - numStops++; - JobHistoryEventHandlerStopped = numStops; - super.serviceStop(); - } - } - @Override protected ContainerAllocator createContainerAllocator( ClientService clientService, AppContext context) { @@ -405,15 +384,13 @@ import org.junit.Test; app.verifyCompleted(); int waitTime = 20 * 1000; - while (waitTime > 0 && app.numStops < 3 ) { + while (waitTime > 0 && app.numStops < 2) { Thread.sleep(100); waitTime -= 100; } - // assert JobHistoryEventHandlerStopped first, then - // ContainerAllocatorStopped, and then stagingDirCleanedup - Assert.assertEquals(1, app.JobHistoryEventHandlerStopped); - Assert.assertEquals(2, app.ContainerAllocatorStopped); - Assert.assertEquals(3, app.stagingDirCleanedup); + // assert ContainerAllocatorStopped and then tagingDirCleanedup + Assert.assertEquals(1, app.ContainerAllocatorStopped); + Assert.assertEquals(2, app.stagingDirCleanedup); } } From e4374d803663c626de610cd5f062f25a6d7d5d4e Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 11 Sep 2013 19:57:37 +0000 Subject: [PATCH 149/153] HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522012 13f79535-47bb-0310-9956-ffa450edef68 --- .../security/token/TokenIdentifier.java | 19 ++++++ .../AbstractDelegationTokenSecretManager.java | 38 +++++++++-- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 + .../org/apache/hadoop/hdfs/DFSConfigKeys.java | 2 + .../DelegationTokenSecretManager.java | 18 ++++- .../hdfs/server/namenode/FSNamesystem.java | 46 +++++++++++-- .../hdfs/server/namenode/HdfsAuditLogger.java | 66 +++++++++++++++++++ 7 files changed, 178 insertions(+), 13 deletions(-) create mode 100644 hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java index a33bcde7072..ebf9d58b37c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/TokenIdentifier.java @@ -21,6 +21,7 @@ package org.apache.hadoop.security.token; import java.io.IOException; import java.util.Arrays; +import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.io.DataOutputBuffer; @@ -35,6 +36,9 @@ import org.apache.hadoop.security.UserGroupInformation; @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) @InterfaceStability.Evolving public abstract class TokenIdentifier implements Writable { + + private String trackingId = null; + /** * Get the token kind * @return the kind of the token @@ -62,4 +66,19 @@ public abstract class TokenIdentifier implements Writable { } return Arrays.copyOf(buf.getData(), buf.getLength()); } + + /** + * Returns a tracking identifier that can be used to associate usages of a + * token across multiple client sessions. + * + * Currently, this function just returns an MD5 of {{@link #getBytes()}. + * + * @return tracking identifier + */ + public String getTrackingId() { + if (trackingId == null) { + trackingId = DigestUtils.md5Hex(getBytes()); + } + return trackingId; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java index 42085acb178..a63e0628288 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java @@ -86,6 +86,11 @@ extends AbstractDelegationTokenIdentifier> private long tokenMaxLifetime; private long tokenRemoverScanInterval; private long tokenRenewInterval; + /** + * Whether to store a token's tracking ID in its TokenInformation. + * Can be overridden by a subclass. + */ + protected boolean storeTokenTrackingId; private Thread tokenRemoverThread; protected volatile boolean running; @@ -102,6 +107,7 @@ extends AbstractDelegationTokenIdentifier> this.tokenMaxLifetime = delegationTokenMaxLifetime; this.tokenRenewInterval = delegationTokenRenewInterval; this.tokenRemoverScanInterval = delegationTokenRemoverScanInterval; + this.storeTokenTrackingId = false; } /** should be called before this object is used */ @@ -201,7 +207,7 @@ extends AbstractDelegationTokenIdentifier> } if (currentTokens.get(identifier) == null) { currentTokens.put(identifier, new DelegationTokenInformation(renewDate, - password)); + password, getTrackingIdIfEnabled(identifier))); } else { throw new IOException( "Same delegation token being added twice."); @@ -280,7 +286,7 @@ extends AbstractDelegationTokenIdentifier> byte[] password = createPassword(identifier.getBytes(), currentKey.getKey()); storeNewToken(identifier, now + tokenRenewInterval); currentTokens.put(identifier, new DelegationTokenInformation(now - + tokenRenewInterval, password)); + + tokenRenewInterval, password, getTrackingIdIfEnabled(identifier))); return password; } @@ -299,6 +305,21 @@ extends AbstractDelegationTokenIdentifier> return info.getPassword(); } + protected String getTrackingIdIfEnabled(TokenIdent ident) { + if (storeTokenTrackingId) { + return ident.getTrackingId(); + } + return null; + } + + public synchronized String getTokenTrackingId(TokenIdent identifier) { + DelegationTokenInformation info = currentTokens.get(identifier); + if (info == null) { + return null; + } + return info.getTrackingId(); + } + /** * Verifies that the given identifier and password are valid and match. * @param identifier Token identifier. @@ -359,8 +380,9 @@ extends AbstractDelegationTokenIdentifier> + " is trying to renew a token with " + "wrong password"); } long renewTime = Math.min(id.getMaxDate(), now + tokenRenewInterval); + String trackingId = getTrackingIdIfEnabled(id); DelegationTokenInformation info = new DelegationTokenInformation(renewTime, - password); + password, trackingId); if (currentTokens.get(id) == null) { throw new InvalidToken("Renewal request for unknown token"); @@ -420,9 +442,13 @@ extends AbstractDelegationTokenIdentifier> public static class DelegationTokenInformation { long renewDate; byte[] password; - public DelegationTokenInformation(long renewDate, byte[] password) { + String trackingId; + + public DelegationTokenInformation(long renewDate, byte[] password, + String trackingId) { this.renewDate = renewDate; this.password = password; + this.trackingId = trackingId; } /** returns renew date */ public long getRenewDate() { @@ -432,6 +458,10 @@ extends AbstractDelegationTokenIdentifier> byte[] getPassword() { return password; } + /** returns tracking id */ + public String getTrackingId() { + return trackingId; + } } /** Remove expired delegation tokens from cache */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ee6ff0f922a..ee45928cd21 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -272,6 +272,8 @@ Release 2.3.0 - UNRELEASED HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs (Todd Lipcon via Colin Patrick McCabe) + HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java index b4d67ca19d2..a29121248c1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java @@ -267,6 +267,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys { public static final String DFS_CLIENT_LOCAL_INTERFACES = "dfs.client.local.interfaces"; public static final String DFS_NAMENODE_AUDIT_LOGGERS_KEY = "dfs.namenode.audit.loggers"; public static final String DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME = "default"; + public static final String DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY = "dfs.namenode.audit.log.token.tracking.id"; + public static final boolean DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT = false; // Much code in hdfs is not yet updated to use these keys. public static final String DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_RETRIES_KEY = "dfs.client.block.write.locateFollowingBlock.retries"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java index 25fb25731f7..f233d1f5f72 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenSecretManager.java @@ -58,6 +58,15 @@ public class DelegationTokenSecretManager .getLog(DelegationTokenSecretManager.class); private final FSNamesystem namesystem; + + public DelegationTokenSecretManager(long delegationKeyUpdateInterval, + long delegationTokenMaxLifetime, long delegationTokenRenewInterval, + long delegationTokenRemoverScanInterval, FSNamesystem namesystem) { + this(delegationKeyUpdateInterval, delegationTokenMaxLifetime, + delegationTokenRenewInterval, delegationTokenRemoverScanInterval, false, + namesystem); + } + /** * Create a secret manager * @param delegationKeyUpdateInterval the number of seconds for rolling new @@ -67,13 +76,16 @@ public class DelegationTokenSecretManager * @param delegationTokenRenewInterval how often the tokens must be renewed * @param delegationTokenRemoverScanInterval how often the tokens are scanned * for expired tokens + * @param storeTokenTrackingId whether to store the token's tracking id */ public DelegationTokenSecretManager(long delegationKeyUpdateInterval, long delegationTokenMaxLifetime, long delegationTokenRenewInterval, - long delegationTokenRemoverScanInterval, FSNamesystem namesystem) { + long delegationTokenRemoverScanInterval, boolean storeTokenTrackingId, + FSNamesystem namesystem) { super(delegationKeyUpdateInterval, delegationTokenMaxLifetime, delegationTokenRenewInterval, delegationTokenRemoverScanInterval); this.namesystem = namesystem; + this.storeTokenTrackingId = storeTokenTrackingId; } @Override //SecretManager @@ -184,7 +196,7 @@ public class DelegationTokenSecretManager } if (currentTokens.get(identifier) == null) { currentTokens.put(identifier, new DelegationTokenInformation(expiryTime, - password)); + password, getTrackingIdIfEnabled(identifier))); } else { throw new IOException( "Same delegation token being added twice; invalid entry in fsimage or editlogs"); @@ -223,7 +235,7 @@ public class DelegationTokenSecretManager byte[] password = createPassword(identifier.getBytes(), allKeys .get(keyId).getKey()); currentTokens.put(identifier, new DelegationTokenInformation(expiryTime, - password)); + password, getTrackingIdIfEnabled(identifier))); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index 884a14b8959..dc733122c68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -36,6 +36,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HA_STANDBY_CHECKPOINTS_KE import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOGGERS_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DEFAULT_AUDIT_LOGGER_NAME; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT; import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY; @@ -220,6 +222,8 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.security.token.TokenIdentifier; +import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; @@ -311,8 +315,14 @@ public class FSNamesystem implements Namesystem, FSClusterStats, stat.getGroup(), symlink, path); } for (AuditLogger logger : auditLoggers) { - logger.logAuditEvent(succeeded, ugi.toString(), addr, - cmd, src, dst, status); + if (logger instanceof HdfsAuditLogger) { + HdfsAuditLogger hdfsLogger = (HdfsAuditLogger) logger; + hdfsLogger.logAuditEvent(succeeded, ugi.toString(), addr, cmd, src, dst, + status, ugi, dtSecretManager); + } else { + logger.logAuditEvent(succeeded, ugi.toString(), addr, + cmd, src, dst, status); + } } } @@ -5906,7 +5916,10 @@ public class FSNamesystem implements Namesystem, FSClusterStats, DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT), conf.getLong(DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY, DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT), - DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL, this); + DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL, + conf.getBoolean(DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY, + DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT), + this); } /** @@ -6817,17 +6830,22 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * defined in the config file. It can also be explicitly listed in the * config file. */ - private static class DefaultAuditLogger implements AuditLogger { + private static class DefaultAuditLogger extends HdfsAuditLogger { + + private boolean logTokenTrackingId; @Override public void initialize(Configuration conf) { - // Nothing to do. + logTokenTrackingId = conf.getBoolean( + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_KEY, + DFSConfigKeys.DFS_NAMENODE_AUDIT_LOG_TOKEN_TRACKING_ID_DEFAULT); } @Override public void logAuditEvent(boolean succeeded, String userName, InetAddress addr, String cmd, String src, String dst, - FileStatus status) { + FileStatus status, UserGroupInformation ugi, + DelegationTokenSecretManager dtSecretManager) { if (auditLog.isInfoEnabled()) { final StringBuilder sb = auditBuffer.get(); sb.setLength(0); @@ -6845,6 +6863,22 @@ public class FSNamesystem implements Namesystem, FSClusterStats, sb.append(status.getGroup()).append(":"); sb.append(status.getPermission()); } + if (logTokenTrackingId) { + sb.append("\t").append("trackingId="); + String trackingId = null; + if (ugi != null && dtSecretManager != null + && ugi.getAuthenticationMethod() == AuthenticationMethod.TOKEN) { + for (TokenIdentifier tid: ugi.getTokenIdentifiers()) { + if (tid instanceof DelegationTokenIdentifier) { + DelegationTokenIdentifier dtid = + (DelegationTokenIdentifier)tid; + trackingId = dtSecretManager.getTokenTrackingId(dtid); + break; + } + } + } + sb.append(trackingId); + } auditLog.info(sb); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java new file mode 100644 index 00000000000..1c2bc57e0ad --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/HdfsAuditLogger.java @@ -0,0 +1,66 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hdfs.server.namenode; + +import java.net.InetAddress; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; +import org.apache.hadoop.security.UserGroupInformation; + +/** + * Extension of {@link AuditLogger}. + */ +@InterfaceAudience.Public +@InterfaceStability.Evolving +public abstract class HdfsAuditLogger implements AuditLogger { + + @Override + public void logAuditEvent(boolean succeeded, String userName, + InetAddress addr, String cmd, String src, String dst, + FileStatus status) { + logAuditEvent(succeeded, userName, addr, cmd, src, dst, status, null, + null); + } + + /** + * Same as + * {@link #logAuditEvent(boolean, String, InetAddress, String, String, String, FileStatus)} + * with additional parameters related to logging delegation token tracking + * IDs. + * + * @param succeeded Whether authorization succeeded. + * @param userName Name of the user executing the request. + * @param addr Remote address of the request. + * @param cmd The requested command. + * @param src Path of affected source file. + * @param dst Path of affected destination file (if any). + * @param stat File information for operations that change the file's metadata + * (permissions, owner, times, etc). + * @param ugi UserGroupInformation of the current user, or null if not logging + * token tracking information + * @param dtSecretManager The token secret manager, or null if not logging + * token tracking information + */ + public abstract void logAuditEvent(boolean succeeded, String userName, + InetAddress addr, String cmd, String src, String dst, + FileStatus stat, UserGroupInformation ugi, + DelegationTokenSecretManager dtSecretManager); +} From b6d9ef18c4969eb1afe6f174780dd727e459c183 Mon Sep 17 00:00:00 2001 From: Andrew Wang Date: Wed, 11 Sep 2013 21:17:30 +0000 Subject: [PATCH 150/153] Move HDFS-4680 in CHANGES.txt git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522049 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ee45928cd21..0627b0ea56e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -272,8 +272,6 @@ Release 2.3.0 - UNRELEASED HDFS-4879. Add "blocked ArrayList" collection to avoid CMS full GCs (Todd Lipcon via Colin Patrick McCabe) - HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) - OPTIMIZATIONS BUG FIXES @@ -369,6 +367,8 @@ Release 2.1.1-beta - UNRELEASED HDFS-5150. Allow per NN SPN for internal SPNEGO. (kihwal) + HDFS-4680. Audit logging of delegation tokens for MR tracing. (Andrew Wang) + OPTIMIZATIONS BUG FIXES From c22091aecb802128b37c51501e40023532e3c62f Mon Sep 17 00:00:00 2001 From: Thomas Graves Date: Wed, 11 Sep 2013 22:00:08 +0000 Subject: [PATCH 151/153] YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include unhealthy nodes (Jonathan Eagles via tgraves) git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522062 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 6 ++++++ .../webapp/dao/ClusterMetricsInfo.java | 2 +- .../resourcemanager/webapp/TestRMWebServices.java | 14 +++++++++++++- 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 270dd247e6a..779f9fcfed0 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -185,6 +185,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1025. ResourceManager and NodeManager do not load native libraries on Windows. (cnauroth) + YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include + unhealthy nodes (Jonathan Eagles via tgraves) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES @@ -1309,6 +1312,9 @@ Release 0.23.10 - UNRELEASED YARN-1101. Active nodes can be decremented below 0 (Robert Parker via tgraves) + YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include + unhealthy nodes (Jonathan Eagles via tgraves) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java index 8266661954b..92c9678371b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java @@ -84,7 +84,7 @@ public class ClusterMetricsInfo { this.decommissionedNodes = clusterMetrics.getNumDecommisionedNMs(); this.rebootedNodes = clusterMetrics.getNumRebootedNMs(); this.totalNodes = activeNodes + lostNodes + decommissionedNodes - + rebootedNodes; + + rebootedNodes + unhealthyNodes; } public int getAppsSubmitted() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java index 4f6cbbec320..89bec21cc24 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java @@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.webapp.WebServicesTestUtils; import org.codehaus.jettison.json.JSONException; import org.codehaus.jettison.json.JSONObject; import org.junit.Before; +import org.junit.BeforeClass; import org.junit.Test; import org.w3c.dom.Document; import org.w3c.dom.Element; @@ -109,6 +110,16 @@ public class TestRMWebServices extends JerseyTest { .contextPath("jersey-guice-filter").servletPath("/").build()); } + @BeforeClass + public static void initClusterMetrics() { + ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics(); + clusterMetrics.incrDecommisionedNMs(); + clusterMetrics.incrNumActiveNodes(); + clusterMetrics.incrNumLostNMs(); + clusterMetrics.incrNumRebootedNMs(); + clusterMetrics.incrNumUnhealthyNMs(); + } + @Test public void testInfoXML() throws JSONException, Exception { WebResource r = resource(); @@ -426,7 +437,8 @@ public class TestRMWebServices extends JerseyTest { "totalNodes doesn't match", clusterMetrics.getNumActiveNMs() + clusterMetrics.getNumLostNMs() + clusterMetrics.getNumDecommisionedNMs() - + clusterMetrics.getNumRebootedNMs(), totalNodes); + + clusterMetrics.getNumRebootedNMs() + + clusterMetrics.getUnhealthyNMs(), totalNodes); assertEquals("lostNodes doesn't match", clusterMetrics.getNumLostNMs(), lostNodes); assertEquals("unhealthyNodes doesn't match", From a202855af59d2eeb1c4a119bee6d4eb56565a9e9 Mon Sep 17 00:00:00 2001 From: Devarajulu K Date: Thu, 12 Sep 2013 14:32:20 +0000 Subject: [PATCH 152/153] MAPREDUCE-5164. mapred job and queue commands omit HADOOP_CLIENT_OPTS. Contributed by Nemon Lou. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522595 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 3 +++ hadoop-mapreduce-project/bin/mapred | 2 ++ hadoop-mapreduce-project/bin/mapred.cmd | 2 ++ 3 files changed, 7 insertions(+) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 3dc14de172b..093273f0cb9 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -262,6 +262,9 @@ Release 2.1.1-beta - UNRELEASED MAPREDUCE-5020. Compile failure with JDK8 (Trevor Robinson via tgraves) + MAPREDUCE-5164. mapred job and queue commands omit HADOOP_CLIENT_OPTS + (Nemon Lou via devaraj) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred index 5509418c817..b95c7cd4f94 100755 --- a/hadoop-mapreduce-project/bin/mapred +++ b/hadoop-mapreduce-project/bin/mapred @@ -61,8 +61,10 @@ esac if [ "$COMMAND" = "job" ] ; then CLASS=org.apache.hadoop.mapred.JobClient + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "queue" ] ; then CLASS=org.apache.hadoop.mapred.JobQueueClient + HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" elif [ "$COMMAND" = "pipes" ] ; then CLASS=org.apache.hadoop.mapred.pipes.Submitter HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS" diff --git a/hadoop-mapreduce-project/bin/mapred.cmd b/hadoop-mapreduce-project/bin/mapred.cmd index d26b930e378..b2d53fa145e 100644 --- a/hadoop-mapreduce-project/bin/mapred.cmd +++ b/hadoop-mapreduce-project/bin/mapred.cmd @@ -103,10 +103,12 @@ goto :eof :job set CLASS=org.apache.hadoop.mapred.JobClient + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :queue set CLASS=org.apache.hadoop.mapred.JobQueueClient + set HADOOP_OPTS=%HADOOP_OPTS% %HADOOP_CLIENT_OPTS% goto :eof :sampler From f152a7e788013249b4782edf08b4b47dbef71036 Mon Sep 17 00:00:00 2001 From: Chris Nauroth Date: Thu, 12 Sep 2013 15:58:34 +0000 Subject: [PATCH 153/153] YARN-1078. TestNodeManagerResync, TestNodeManagerShutdown, and TestNodeStatusUpdater fail on Windows. Contributed by Chuan Liu. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1522644 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-yarn-project/CHANGES.txt | 3 ++ .../nodemanager/TestNodeManagerShutdown.java | 4 ++- .../nodemanager/TestNodeStatusUpdater.java | 32 ++++++++++++------- 3 files changed, 26 insertions(+), 13 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 779f9fcfed0..266faa54754 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -188,6 +188,9 @@ Release 2.1.1-beta - UNRELEASED YARN-1176. RM web services ClusterMetricsInfo total nodes doesn't include unhealthy nodes (Jonathan Eagles via tgraves) + YARN-1078. TestNodeManagerResync, TestNodeManagerShutdown, and + TestNodeStatusUpdater fail on Windows. (Chuan Liu via cnauroth) + Release 2.1.0-beta - 2013-08-22 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index 6fcb1e0710a..4c9559d660a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -23,6 +23,7 @@ import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.security.PrivilegedAction; import java.util.ArrayList; @@ -163,7 +164,8 @@ public class TestNodeManagerShutdown { ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); - NodeId nodeId = BuilderUtils.newNodeId("localhost", 12345); + NodeId nodeId = BuilderUtils.newNodeId(InetAddress.getByName("localhost") + .getCanonicalHostName(), 12345); URL localResourceUri = ConverterUtils.getYarnUrlFromPath(localFS diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 3fc5a2dddfb..4f7cd30ed66 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -23,7 +23,9 @@ import static org.mockito.Mockito.when; import java.io.File; import java.io.IOException; +import java.net.InetAddress; import java.net.InetSocketAddress; +import java.net.UnknownHostException; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.Collections; @@ -219,11 +221,11 @@ public class TestNodeStatusUpdater { Resource resource = BuilderUtils.newResource(2, 1); long currentTime = System.currentTimeMillis(); String user = "testUser"; - ContainerTokenIdentifier containerToken = - BuilderUtils.newContainerTokenIdentifier(BuilderUtils - .newContainerToken(firstContainerID, "localhost", 1234, user, - resource, currentTime + 10000, 123, "password".getBytes(), - currentTime)); + ContainerTokenIdentifier containerToken = BuilderUtils + .newContainerTokenIdentifier(BuilderUtils.newContainerToken( + firstContainerID, InetAddress.getByName("localhost") + .getCanonicalHostName(), 1234, user, resource, + currentTime + 10000, 123, "password".getBytes(), currentTime)); Container container = new ContainerImpl(conf, mockDispatcher, launchContext, null, mockMetrics, containerToken); @@ -250,11 +252,11 @@ public class TestNodeStatusUpdater { long currentTime = System.currentTimeMillis(); String user = "testUser"; Resource resource = BuilderUtils.newResource(3, 1); - ContainerTokenIdentifier containerToken = - BuilderUtils.newContainerTokenIdentifier(BuilderUtils - .newContainerToken(secondContainerID, "localhost", 1234, user, - resource, currentTime + 10000, 123, - "password".getBytes(), currentTime)); + ContainerTokenIdentifier containerToken = BuilderUtils + .newContainerTokenIdentifier(BuilderUtils.newContainerToken( + secondContainerID, InetAddress.getByName("localhost") + .getCanonicalHostName(), 1234, user, resource, + currentTime + 10000, 123, "password".getBytes(), currentTime)); Container container = new ContainerImpl(conf, mockDispatcher, launchContext, null, mockMetrics, containerToken); @@ -1290,9 +1292,15 @@ public class TestNodeStatusUpdater { private YarnConfiguration createNMConfig() { YarnConfiguration conf = new YarnConfiguration(); + String localhostAddress = null; + try { + localhostAddress = InetAddress.getByName("localhost").getCanonicalHostName(); + } catch (UnknownHostException e) { + Assert.fail("Unable to get localhost address: " + e.getMessage()); + } conf.setInt(YarnConfiguration.NM_PMEM_MB, 5 * 1024); // 5GB - conf.set(YarnConfiguration.NM_ADDRESS, "localhost:12345"); - conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, "localhost:12346"); + conf.set(YarnConfiguration.NM_ADDRESS, localhostAddress + ":12345"); + conf.set(YarnConfiguration.NM_LOCALIZER_ADDRESS, localhostAddress + ":12346"); conf.set(YarnConfiguration.NM_LOG_DIRS, logsDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogsDir.getAbsolutePath());