diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index e196b74927a..ce666246d4a 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -333,6 +333,9 @@ Release 2.3.0 - UNRELEASED HADOOP-9915. o.a.h.fs.Stat support on Mac OS X (Binglin Chang via Colin Patrick McCabe) + HADOOP-9998. Provide methods to clear only part of the DNSToSwitchMapping. + (Junping Du via Colin Patrick McCabe) + OPTIMIZATIONS HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn) @@ -392,6 +395,12 @@ Release 2.1.2 - UNRELEASED IMPROVEMENTS + HADOOP-9948. Add a config value to CLITestHelper to skip tests on Windows. + (Chuan Liu via cnauroth) + + HADOOP-9976. Different versions of avro and avro-maven-plugin (Karthik + Kambatla via Sandy Ryza) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java index e152c549ffc..af487ed5c61 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/CachedDNSToSwitchMapping.java @@ -154,4 +154,11 @@ public class CachedDNSToSwitchMapping extends AbstractDNSToSwitchMapping { public void reloadCachedMappings() { cache.clear(); } + + @Override + public void reloadCachedMappings(List names) { + for (String name : names) { + cache.remove(name); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java index ccc109302fd..7b1b332b9b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNSToSwitchMapping.java @@ -59,4 +59,12 @@ public interface DNSToSwitchMapping { * will get a chance to see the new data. */ public void reloadCachedMappings(); + + /** + * Reload cached mappings on specific nodes. + * + * If there is a cache on these nodes, this method will clear it, so that + * future accesses will see updated data. + */ + public void reloadCachedMappings(List names); } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java index c9e62e8580c..2d02e133618 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/ScriptBasedMapping.java @@ -269,5 +269,11 @@ public final class ScriptBasedMapping extends CachedDNSToSwitchMapping { // Nothing to do here, since RawScriptBasedMapping has no cache, and // does not inherit from CachedDNSToSwitchMapping } + + @Override + public void reloadCachedMappings(List names) { + // Nothing to do here, since RawScriptBasedMapping has no cache, and + // does not inherit from CachedDNSToSwitchMapping + } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java index fa96476ff6e..2662108124d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java @@ -162,5 +162,12 @@ public class TableMapping extends CachedDNSToSwitchMapping { } } } + + @Override + public void reloadCachedMappings(List names) { + // TableMapping has to reload all mappings at once, so no chance to + // reload mappings on specific nodes + reloadCachedMappings(); + } } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java index 3fba5d98a57..d39727bc602 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/cli/CLITestHelper.java @@ -21,11 +21,10 @@ package org.apache.hadoop.cli; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.cli.util.*; -import org.apache.hadoop.cli.util.CLITestCmd; -import org.apache.hadoop.cli.util.CLICommand; import org.apache.hadoop.cli.util.CommandExecutor.Result; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.StringUtils; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; @@ -369,6 +368,7 @@ public class CLITestHelper { CLITestData td = null; ArrayList testCommands = null; ArrayList cleanupCommands = null; + boolean runOnWindows = true; @Override public void startDocument() throws SAXException { @@ -399,6 +399,8 @@ public class CLITestHelper { throws SAXException { if (qName.equals("description")) { td.setTestDesc(charString); + } else if (qName.equals("windows")) { + runOnWindows = Boolean.parseBoolean(charString); } else if (qName.equals("test-commands")) { td.setTestCommands(testCommands); testCommands = null; @@ -420,8 +422,11 @@ public class CLITestHelper { } else if (qName.equals("expected-output")) { comparatorData.setExpectedOutput(charString); } else if (qName.equals("test")) { - testsFromConfigFile.add(td); + if (!Shell.WINDOWS || runOnWindows) { + testsFromConfigFile.add(td); + } td = null; + runOnWindows = true; } else if (qName.equals("mode")) { testMode = charString; if (!testMode.equals(TESTMODE_NOCOMPARE) && diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java index 5492b47c8b7..493d86000ee 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/StaticMapping.java @@ -152,4 +152,10 @@ public class StaticMapping extends AbstractDNSToSwitchMapping { // reloadCachedMappings does nothing for StaticMapping; there is // nowhere to reload from since all data is in memory. } + + @Override + public void reloadCachedMappings(List names) { + // reloadCachedMappings does nothing for StaticMapping; there is + // nowhere to reload from since all data is in memory. + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java index 1caa454243f..b5de661caca 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/net/TestSwitchMapping.java @@ -120,5 +120,9 @@ public class TestSwitchMapping extends Assert { @Override public void reloadCachedMappings() { } + + @Override + public void reloadCachedMappings(List names) { + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4db26da535f..9137e187805 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -120,6 +120,9 @@ Trunk (Unreleased) HDFS-4904. Remove JournalService. (Arpit Agarwal via cnauroth) + HDFS-5041. Add the time of last heartbeat to dead server Web UI (Shinichi + Yamashita via brandonli) + OPTIMIZATIONS BUG FIXES @@ -345,6 +348,12 @@ Release 2.1.2 - UNRELEASED HDFS-5251. Race between the initialization of NameNode and the http server. (Haohui Mai via suresh) + HDFS-5258. Skip tests in TestHDFSCLI that are not applicable on Windows. + (Chuan Liu via cnauroth) + + HDFS-5186. TestFileJournalManager fails on Windows due to file handle leaks. + (Chuan Liu via cnauroth) + Release 2.1.1-beta - 2013-09-23 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java index c468dcb7188..03b5681f763 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java @@ -886,7 +886,12 @@ public class DatanodeManager { // If the network location is invalid, clear the cached mappings // so that we have a chance to re-add this DataNode with the // correct network location later. - dnsToSwitchMapping.reloadCachedMappings(); + List invalidNodeNames = new ArrayList(3); + // clear cache for nodes in IP or Hostname + invalidNodeNames.add(nodeReg.getIpAddr()); + invalidNodeNames.add(nodeReg.getHostName()); + invalidNodeNames.add(nodeReg.getPeerHostName()); + dnsToSwitchMapping.reloadCachedMappings(invalidNodeNames); throw e; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java index 5ad877979a5..e6566a85862 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java @@ -30,6 +30,7 @@ import java.net.URLEncoder; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; +import java.util.Date; import java.util.Iterator; import java.util.List; import java.util.Map; @@ -786,9 +787,13 @@ class NamenodeJspHelper { */ generateNodeDataHeader(out, d, suffix, alive, nnHttpPort, nnaddr); + long currentTime = Time.now(); + long timestamp = d.getLastUpdate(); if (!alive) { - out.print(" " + - d.isDecommissioned() + "\n"); + out.print(" " + + new Date(timestamp) + + " " + + d.isDecommissioned() + "\n"); return; } @@ -801,9 +806,6 @@ class NamenodeJspHelper { String percentRemaining = fraction2String(d.getRemainingPercent()); String adminState = d.getAdminState().toString(); - - long timestamp = d.getLastUpdate(); - long currentTime = Time.now(); long bpUsed = d.getBlockPoolUsed(); String percentBpUsed = fraction2String(d.getBlockPoolUsedPercent()); @@ -954,6 +956,8 @@ class NamenodeJspHelper { + " Node Transferring
Address Last
Contact Decommissioned\n"); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java index 44d1058806f..342a167cac3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFileJournalManager.java @@ -86,24 +86,27 @@ public class TestFileJournalManager { EditLogInputStream elis = null; try { while ((elis = allStreams.poll()) != null) { - elis.skipUntil(txId); - while (true) { - FSEditLogOp op = elis.readOp(); - if (op == null) { - break; + try { + elis.skipUntil(txId); + while (true) { + FSEditLogOp op = elis.readOp(); + if (op == null) { + break; + } + if (abortOnGap && (op.getTransactionId() != txId)) { + LOG.info("getNumberOfTransactions: detected gap at txId " + + fromTxId); + return numTransactions; + } + txId = op.getTransactionId() + 1; + numTransactions++; } - if (abortOnGap && (op.getTransactionId() != txId)) { - LOG.info("getNumberOfTransactions: detected gap at txId " + - fromTxId); - return numTransactions; - } - txId = op.getTransactionId() + 1; - numTransactions++; + } finally { + IOUtils.cleanup(LOG, elis); } } } finally { IOUtils.cleanup(LOG, allStreams.toArray(new EditLogInputStream[0])); - IOUtils.cleanup(LOG, elis); } return numTransactions; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml index 16a44ff4cf8..00730dab413 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml @@ -1043,6 +1043,7 @@ ls: Negative test for quoted /*/* globbing + false -fs NAMENODE -mkdir /dir0 -fs NAMENODE -mkdir /dir0/dir1 @@ -1062,6 +1063,7 @@ ls: Test for quoted globbing + false -fs NAMENODE -mkdir /dir0 -fs NAMENODE -mkdir /dir0/\* @@ -1082,6 +1084,7 @@ rm: Test for quoted globbing + false -fs NAMENODE -mkdir /dir0 -fs NAMENODE -mkdir /dir0/\* diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 24337fcd6be..82463d3db9e 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -221,6 +221,15 @@ Release 2.1.2 - UNRELEASED MAPREDUCE-5503. Fixed a test issue in TestMRJobClient. (Jian He via vinodkv) + MAPREDUCE-5170. Fixed a wrong log message in CombineFileInputFormat class. + (Sangjin Lee via vinodkv) + + MAPREDUCE-5525. Increase timeout of TestDFSIO.testAppend and + TestMRJobsWithHistoryService.testJobHistoryData. (Chuan Liu via cnauroth) + + MAPREDUCE-5513. ConcurrentModificationException in JobControl (Robert + Parker via jlowe) + Release 2.1.1-beta - 2013-09-23 INCOMPATIBLE CHANGES @@ -1403,6 +1412,9 @@ Release 0.23.10 - UNRELEASED MAPREDUCE-5504. mapred queue -info inconsistent with types (Kousuke Saruta via tgraves) + MAPREDUCE-5513. ConcurrentModificationException in JobControl (Robert + Parker via jlowe) + Release 0.23.9 - 2013-07-08 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java index 5556eee7c8e..4cf5c36f71b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java @@ -203,13 +203,13 @@ public abstract class CombineFileInputFormat maxSize); } if (minSizeRack != 0 && maxSize != 0 && minSizeRack > maxSize) { - throw new IOException("Minimum split size per rack" + minSizeRack + + throw new IOException("Minimum split size per rack " + minSizeRack + " cannot be larger than maximum split size " + maxSize); } if (minSizeRack != 0 && minSizeNode > minSizeRack) { - throw new IOException("Minimum split size per node" + minSizeNode + - " cannot be smaller than minimum split " + + throw new IOException("Minimum split size per node " + minSizeNode + + " cannot be larger than minimum split " + "size per rack " + minSizeRack); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java index 3c3e362ae26..ca4857ecb75 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/jobcontrol/JobControl.java @@ -79,13 +79,11 @@ public class JobControl implements Runnable { this.runnerState = ThreadState.READY; } - private static List toList( + synchronized private static List toList( LinkedList jobs) { ArrayList retv = new ArrayList(); - synchronized (jobs) { - for (ControlledJob job : jobs) { - retv.add(job); - } + for (ControlledJob job : jobs) { + retv.add(job); } return retv; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java index 24f67925ed0..86d5016a67b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java @@ -98,6 +98,10 @@ public class TestJobHistoryParsing { @Override public void reloadCachedMappings() { } + + @Override + public void reloadCachedMappings(List names) { + } } @Test(timeout = 50000) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java index 6bf44d29517..b354b204b8e 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java @@ -269,7 +269,7 @@ public class TestDFSIO implements Tool { bench.analyzeResult(fs, TestType.TEST_TYPE_READ_SKIP, execTime); } - @Test (timeout = 3000) + @Test (timeout = 6000) public void testAppend() throws Exception { FileSystem fs = cluster.getFileSystem(); long tStart = System.currentTimeMillis(); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java index 096a6f44c98..a52b34d62cd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java @@ -111,7 +111,7 @@ public class TestMRJobsWithHistoryService { } } - @Test (timeout = 30000) + @Test (timeout = 90000) public void testJobHistoryData() throws IOException, InterruptedException, AvroRemoteException, ClassNotFoundException { if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) { diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 01b220381f9..126220359e7 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -59,6 +59,9 @@ ${basedir}/../../hadoop-common-project/hadoop-common/target file:///dev/urandom + + 1.7.4 + 1.9 @@ -630,7 +633,7 @@ org.apache.avro avro - 1.7.4 + ${avro.version} net.sf.kosmosfs @@ -798,7 +801,7 @@ org.apache.avro avro-maven-plugin - 1.5.3 + ${avro.version} org.codehaus.mojo.jspc diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index e075c18a7eb..e9cef587164 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -36,6 +36,9 @@ Release 2.3.0 - UNRELEASED YARN-353. Add Zookeeper-based store implementation for RMStateStore. (Bikas Saha, Jian He and Karthik Kambatla via hitesh) + YARN-819. ResourceManager and NodeManager should check for a minimum allowed + version (Robert Parker via jeagles) + OPTIMIZATIONS BUG FIXES @@ -83,6 +86,9 @@ Release 2.1.2 - UNRELEASED YARN-49. Improve distributed shell application to work on a secure cluster. (Vinod Kumar Vavilapalli via hitesh) + YARN-1157. Fixed ResourceManager UI to behave correctly when apps like + distributed-shell do not set tracking urls. (Xuan Gong via vinodkv) + Release 2.1.1-beta - 2013-09-23 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java index 3c72568b1b4..15c36802bbe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/FinishApplicationMasterRequest.java @@ -100,11 +100,22 @@ public abstract class FinishApplicationMasterRequest { public abstract String getTrackingUrl(); /** - * Set the tracking URLfor the ApplicationMaster - * This url if contains scheme then that will be used by resource manager - * web application proxy otherwise it will default to http. - * @param url tracking URLfor the - * ApplicationMaster + * Set the final tracking URLfor the ApplicationMaster. + * This is the web-URL to which ResourceManager or web-application proxy will + * redirect client/users once the application is finished and the + * ApplicationMaster is gone. + *

+ * If the passed url has a scheme then that will be used by the + * ResourceManager and web-application proxy, otherwise the scheme will + * default to http. + *

+ *

+ * Empty, null, "N/A" strings are all valid besides a real URL. In case an url + * isn't explicitly passed, it defaults to "N/A" on the ResourceManager. + *

+ * + * @param url + * tracking URLfor the ApplicationMaster */ @Public @Stable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java index 05668dd16bf..0b485d18fd1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/RegisterApplicationMasterRequest.java @@ -112,11 +112,22 @@ public abstract class RegisterApplicationMasterRequest { public abstract String getTrackingUrl(); /** - * Set the tracking URL for the ApplicationMaster. - * This url if contains scheme then that will be used by resource manager - * web application proxy otherwise it will default to http. - * @param trackingUrl tracking URL for the - * ApplicationMaster + * Set the tracking URLfor the ApplicationMaster while + * it is running. This is the web-URL to which ResourceManager or + * web-application proxy will redirect client/users while the application and + * the ApplicationMaster are still running. + *

+ * If the passed url has a scheme then that will be used by the + * ResourceManager and web-application proxy, otherwise the scheme will + * default to http. + *

+ *

+ * Empty, null, "N/A" strings are all valid besides a real URL. In case an url + * isn't explicitly passed, it defaults to "N/A" on the ResourceManager. + *

+ * + * @param trackingUrl + * tracking URLfor the ApplicationMaster */ @Public @Stable diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 6bf8428e292..dc5baa1a166 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -362,6 +362,13 @@ public class YarnConfiguration extends Configuration { public static final long DEFAULT_RM_NMTOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS = 24 * 60 * 60; + + public static final String RM_NODEMANAGER_MINIMUM_VERSION = + RM_PREFIX + "nodemanager.minimum.version"; + + public static final String DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION = + "NONE"; + //////////////////////////////// // Node Manager Configs //////////////////////////////// @@ -460,6 +467,10 @@ public class YarnConfiguration extends Configuration { public static final String NM_LOG_DIRS = NM_PREFIX + "log-dirs"; public static final String DEFAULT_NM_LOG_DIRS = "/tmp/logs"; + public static final String NM_RESOURCEMANAGER_MINIMUM_VERSION = + NM_PREFIX + "resourcemanager.minimum.version"; + public static final String DEFAULT_NM_RESOURCEMANAGER_MINIMUM_VERSION = "NONE"; + /** Interval at which the delayed token removal thread runs */ public static final String RM_DELAYED_DELEGATION_TOKEN_REMOVAL_INTERVAL_MS = RM_PREFIX + "delayed.delegation-token.removal-interval-ms"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java index 71ee9f9f5a7..cb8c86aaea3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClientContainerRequest.java @@ -215,6 +215,10 @@ public class TestAMRMClientContainerRequest { @Override public void reloadCachedMappings() {} + + @Override + public void reloadCachedMappings(List names) { + } } private void verifyResourceRequest( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 65718a9d69c..a77115824d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -358,6 +358,14 @@ 1000 + + The minimum allowed version of a connecting nodemanager. The valid values are + NONE (no version checking), EqualToRM (the nodemanager's version is equal to + or greater than the RM version), or a Version String. + yarn.resourcemanager.nodemanager.minimum.version + NONE + + Enable a set of periodic monitors (specified in yarn.resourcemanager.scheduler.monitor.policies) that affect the @@ -737,6 +745,14 @@ 30 + + The minimum allowed version of a resourcemanager that a nodemanager will connect to. + The valid values are NONE (no version checking), EqualToNM (the resourcemanager's version is + equal to or greater than the NM version), or a Version String. + yarn.nodemanager.resourcemanager.minimum.version + NONE + + Max number of threads in NMClientAsync to process container management events diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java index d3ccfcaa92a..42f7b9b5091 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestRackResolver.java @@ -67,6 +67,10 @@ public class TestRackResolver { public void reloadCachedMappings() { // nothing to do here, since RawScriptBasedMapping has no cache. } + + @Override + public void reloadCachedMappings(List names) { + } } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java index ac9ee771b0d..32f44a475ed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerRequest.java @@ -25,8 +25,10 @@ public interface RegisterNodeManagerRequest { NodeId getNodeId(); int getHttpPort(); Resource getResource(); + String getNMVersion(); void setNodeId(NodeId nodeId); void setHttpPort(int port); void setResource(Resource resource); + void setNMVersion(String version); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java index 8e226299518..b20803fb9cc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/RegisterNodeManagerResponse.java @@ -42,4 +42,7 @@ public interface RegisterNodeManagerResponse { void setDiagnosticsMessage(String diagnosticsMessage); + void setRMVersion(String version); + + String getRMVersion(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java index e0a09975522..b81a5900841 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/protocolrecords/impl/pb/RegisterNodeManagerRequestPBImpl.java @@ -139,6 +139,21 @@ public class RegisterNodeManagerRequestPBImpl extends ProtoBase containers = new ConcurrentSkipListMap(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index ec2d4350427..d29115797e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -28,6 +28,7 @@ import org.apache.hadoop.ipc.Server; import org.apache.hadoop.net.Node; import org.apache.hadoop.security.authorize.PolicyProvider; import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.util.VersionUtil; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -55,6 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManag import org.apache.hadoop.yarn.server.resourcemanager.security.authorize.RMPolicyProvider; import org.apache.hadoop.yarn.server.utils.YarnServerBuilderUtils; import org.apache.hadoop.yarn.util.RackResolver; +import org.apache.hadoop.yarn.util.YarnVersionInfo; public class ResourceTrackerService extends AbstractService implements ResourceTracker { @@ -73,6 +75,7 @@ public class ResourceTrackerService extends AbstractService implements private long nextHeartBeatInterval; private Server server; private InetSocketAddress resourceTrackerAddress; + private String minimumNodeManagerVersion; private static final NodeHeartbeatResponse resync = recordFactory .newRecordInstance(NodeHeartbeatResponse.class); @@ -99,6 +102,7 @@ public class ResourceTrackerService extends AbstractService implements this.nmLivelinessMonitor = nmLivelinessMonitor; this.containerTokenSecretManager = containerTokenSecretManager; this.nmTokenSecretManager = nmTokenSecretManager; + } @Override @@ -124,7 +128,11 @@ public class ResourceTrackerService extends AbstractService implements minAllocVcores = conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES); - + + minimumNodeManagerVersion = conf.get( + YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION, + YarnConfiguration.DEFAULT_RM_NODEMANAGER_MINIMUM_VERSION); + super.serviceInit(conf); } @@ -172,10 +180,30 @@ public class ResourceTrackerService extends AbstractService implements int cmPort = nodeId.getPort(); int httpPort = request.getHttpPort(); Resource capability = request.getResource(); + String nodeManagerVersion = request.getNMVersion(); RegisterNodeManagerResponse response = recordFactory .newRecordInstance(RegisterNodeManagerResponse.class); + if (!minimumNodeManagerVersion.equals("NONE")) { + if (minimumNodeManagerVersion.equals("EqualToRM")) { + minimumNodeManagerVersion = YarnVersionInfo.getVersion(); + } + + if ((nodeManagerVersion == null) || + (VersionUtil.compareVersions(nodeManagerVersion,minimumNodeManagerVersion)) < 0) { + String message = + "Disallowed NodeManager Version " + nodeManagerVersion + + ", is less than the minimum version " + + minimumNodeManagerVersion + " sending SHUTDOWN signal to " + + "NodeManager."; + LOG.info(message); + response.setDiagnosticsMessage(message); + response.setNodeAction(NodeAction.SHUTDOWN); + return response; + } + } + // Check if this node is a 'valid' node if (!this.nodesListManager.isValidNode(host)) { String message = @@ -230,6 +258,7 @@ public class ResourceTrackerService extends AbstractService implements LOG.info(message); response.setNodeAction(NodeAction.NORMAL); response.setRMIdentifier(ResourceManager.getClusterTimeStamp()); + response.setRMVersion(YarnVersionInfo.getVersion()); return response; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 530b343afe2..ae3bc9f7698 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -994,7 +994,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { } } - static final class AMRegisteredTransition extends BaseTransition { + private static final class AMRegisteredTransition extends BaseTransition { @Override public void transition(RMAppAttemptImpl appAttempt, RMAppAttemptEvent event) { @@ -1003,7 +1003,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { = (RMAppAttemptRegistrationEvent) event; appAttempt.host = registrationEvent.getHost(); appAttempt.rpcPort = registrationEvent.getRpcport(); - appAttempt.origTrackingUrl = registrationEvent.getTrackingurl(); + appAttempt.origTrackingUrl = + sanitizeTrackingUrl(registrationEvent.getTrackingurl()); appAttempt.proxiedTrackingUrl = appAttempt.generateProxyUriWithoutScheme(appAttempt.origTrackingUrl); @@ -1138,7 +1139,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { RMAppAttemptUnregistrationEvent unregisterEvent = (RMAppAttemptUnregistrationEvent) event; appAttempt.diagnostics.append(unregisterEvent.getDiagnostics()); - appAttempt.origTrackingUrl = unregisterEvent.getTrackingUrl(); + appAttempt.origTrackingUrl = + sanitizeTrackingUrl(unregisterEvent.getTrackingUrl()); appAttempt.proxiedTrackingUrl = appAttempt.generateProxyUriWithoutScheme(appAttempt.origTrackingUrl); appAttempt.finalStatus = unregisterEvent.getFinalApplicationStatus(); @@ -1292,4 +1294,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.rmContext.getAMRMTokenSecretManager() .applicationMasterFinished(appAttempt.getAppAttemptId()); } + + private static String sanitizeTrackingUrl(String url) { + return (url == null || url.trim().isEmpty()) ? "N/A" : url; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index f943101e1cc..7f4d3f01055 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -58,6 +58,7 @@ import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode; import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.apache.hadoop.yarn.util.resource.Resources; @Private @@ -96,9 +97,9 @@ public class NodeManager implements ContainerManagementProtocol { RegisterNodeManagerRequest request = recordFactory .newRecordInstance(RegisterNodeManagerRequest.class); request.setHttpPort(httpPort); - request.setNodeId(this.nodeId); request.setResource(capability); request.setNodeId(this.nodeId); + request.setNMVersion(YarnVersionInfo.getVersion()); resourceTrackerService.registerNodeManager(request); this.schedulerNode = new FiCaSchedulerNode(rmContext.getRMNodes().get( this.nodeId), false); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index 6026ef9ec52..81e2a81fe8a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent; import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; +import org.apache.hadoop.yarn.util.YarnVersionInfo; import org.junit.After; import org.junit.Test; @@ -248,6 +249,59 @@ public class TestResourceTrackerService { checkDecommissionedNMCount(rm, ++initialMetricCount); } + @Test + public void testNodeRegistrationSuccess() throws Exception { + writeToHostsFile("host2"); + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile + .getAbsolutePath()); + rm = new MockRM(conf); + rm.start(); + + ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService(); + RegisterNodeManagerRequest req = Records.newRecord( + RegisterNodeManagerRequest.class); + NodeId nodeId = NodeId.newInstance("host2", 1234); + Resource capability = BuilderUtils.newResource(1024, 1); + req.setResource(capability); + req.setNodeId(nodeId); + req.setHttpPort(1234); + req.setNMVersion(YarnVersionInfo.getVersion()); + // trying to register a invalid node. + RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req); + Assert.assertEquals(NodeAction.NORMAL,response.getNodeAction()); + } + + @Test + public void testNodeRegistrationVersionLessThanRM() throws Exception { + writeToHostsFile("host2"); + Configuration conf = new Configuration(); + conf.set(YarnConfiguration.RM_NODES_INCLUDE_FILE_PATH, hostFile + .getAbsolutePath()); + conf.set(YarnConfiguration.RM_NODEMANAGER_MINIMUM_VERSION,"EqualToRM" ); + rm = new MockRM(conf); + rm.start(); + String nmVersion = "1.9.9"; + + ResourceTrackerService resourceTrackerService = rm.getResourceTrackerService(); + RegisterNodeManagerRequest req = Records.newRecord( + RegisterNodeManagerRequest.class); + NodeId nodeId = NodeId.newInstance("host2", 1234); + Resource capability = BuilderUtils.newResource(1024, 1); + req.setResource(capability); + req.setNodeId(nodeId); + req.setHttpPort(1234); + req.setNMVersion(nmVersion); + // trying to register a invalid node. + RegisterNodeManagerResponse response = resourceTrackerService.registerNodeManager(req); + Assert.assertEquals(NodeAction.SHUTDOWN,response.getNodeAction()); + Assert.assertTrue("Diagnostic message did not contain: 'Disallowed NodeManager " + + "Version "+ nmVersion + ", is less than the minimum version'", + response.getDiagnosticsMessage().contains("Disallowed NodeManager Version " + + nmVersion + ", is less than the minimum version ")); + + } + @Test public void testNodeRegistrationFailure() throws Exception { writeToHostsFile("host1"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImpl.java deleted file mode 100644 index e69d867e0f4..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptImpl.java +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt; - -import junit.framework.Assert; -import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; -import org.apache.hadoop.yarn.conf.YarnConfiguration; -import org.apache.hadoop.yarn.event.Dispatcher; -import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRegistrationEvent; - -import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; -import org.junit.Test; -import org.mockito.Mockito; - -public class TestRMAppAttemptImpl { - - private void testTrackingUrl(String url, boolean unmanaged) { - ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance - (ApplicationId.newInstance(1, 2), 1); - EventHandler handler = Mockito.mock(EventHandler.class); - Dispatcher dispatcher = Mockito.mock(Dispatcher.class); - Mockito.when(dispatcher.getEventHandler()).thenReturn(handler); - RMContext rmContext = Mockito.mock(RMContext.class); - Mockito.when(rmContext.getDispatcher()).thenReturn(dispatcher); - - ApplicationSubmissionContext appContext = - Mockito.mock(ApplicationSubmissionContext.class); - Mockito.when(appContext.getUnmanagedAM()).thenReturn(unmanaged); - - RMAppAttemptImpl attempt = new RMAppAttemptImpl(attemptId, rmContext, null, - null, appContext, new YarnConfiguration(), null); - RMAppAttemptRegistrationEvent event = - Mockito.mock(RMAppAttemptRegistrationEvent.class); - Mockito.when(event.getHost()).thenReturn("h"); - Mockito.when(event.getRpcport()).thenReturn(0); - Mockito.when(event.getTrackingurl()).thenReturn(url); - new RMAppAttemptImpl.AMRegisteredTransition().transition(attempt, event); - if (unmanaged) { - Assert.assertEquals(url, attempt.getTrackingUrl()); - } else { - Assert.assertNotSame(url, attempt.getTrackingUrl()); - Assert.assertTrue(attempt.getTrackingUrl().contains( - ProxyUriUtils.PROXY_SERVLET_NAME)); - Assert.assertTrue(attempt.getTrackingUrl().contains( - attemptId.getApplicationId().toString())); - } - } - - @Test - public void testTrackingUrlUnmanagedAM() { - testTrackingUrl("http://foo:8000/x", true); - } - - @Test - public void testTrackingUrlManagedAM() { - testTrackingUrl("bar:8000/x", false); - } -} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index 056589a8e3f..a878be11dd8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -30,14 +30,18 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.net.URI; +import java.net.URISyntaxException; import java.util.Arrays; import java.util.Collection; import java.util.Collections; import java.util.List; +import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; @@ -85,8 +89,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSec import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM; import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager; import org.apache.hadoop.yarn.server.utils.BuilderUtils; +import org.apache.hadoop.yarn.server.webproxy.ProxyUriUtils; import org.apache.hadoop.yarn.webapp.util.WebAppUtils; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; @@ -261,8 +267,22 @@ public class TestRMAppAttemptTransitions { private String getProxyUrl(RMAppAttempt appAttempt) { - return pjoin(RM_WEBAPP_ADDR, "proxy", - appAttempt.getAppAttemptId().getApplicationId(), ""); + String url = null; + try { + URI trackingUri = + StringUtils.isEmpty(appAttempt.getOriginalTrackingUrl()) ? null : + ProxyUriUtils + .getUriFromAMUrl(appAttempt.getOriginalTrackingUrl()); + String proxy = WebAppUtils.getProxyHostAndPort(conf); + URI proxyUri = ProxyUriUtils.getUriFromAMUrl(proxy); + URI result = ProxyUriUtils.getProxyUri(trackingUri, proxyUri, + appAttempt.getAppAttemptId().getApplicationId()); + url = result.toASCIIString().substring( + HttpConfig.getSchemePrefix().length()); + } catch (URISyntaxException ex) { + Assert.fail(); + } + return url; } /** @@ -448,9 +468,9 @@ public class TestRMAppAttemptTransitions { assertEquals(container, applicationAttempt.getMasterContainer()); assertEquals(host, applicationAttempt.getHost()); assertEquals(rpcPort, applicationAttempt.getRpcPort()); - assertEquals(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); + verifyUrl(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); if (unmanagedAM) { - assertEquals("oldtrackingurl", applicationAttempt.getTrackingUrl()); + verifyUrl(trackingUrl, applicationAttempt.getTrackingUrl()); } else { assertEquals(getProxyUrl(applicationAttempt), applicationAttempt.getTrackingUrl()); @@ -468,7 +488,7 @@ public class TestRMAppAttemptTransitions { assertEquals(RMAppAttemptState.FINISHING, applicationAttempt.getAppAttemptState()); assertEquals(diagnostics, applicationAttempt.getDiagnostics()); - assertEquals(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); + verifyUrl(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); assertEquals(getProxyUrl(applicationAttempt), applicationAttempt.getTrackingUrl()); assertEquals(container, applicationAttempt.getMasterContainer()); @@ -487,9 +507,9 @@ public class TestRMAppAttemptTransitions { assertEquals(RMAppAttemptState.FINISHED, applicationAttempt.getAppAttemptState()); assertEquals(diagnostics, applicationAttempt.getDiagnostics()); - assertEquals(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); + verifyUrl(trackingUrl, applicationAttempt.getOriginalTrackingUrl()); if (unmanagedAM) { - assertEquals("mytrackingurl", applicationAttempt.getTrackingUrl()); + verifyUrl(trackingUrl, applicationAttempt.getTrackingUrl()); } else { assertEquals(getProxyUrl(applicationAttempt), @@ -603,9 +623,7 @@ public class TestRMAppAttemptTransitions { trackingUrl, diagnostics); } - - @Test - public void testUnmanagedAMSuccess() { + private void testUnmanagedAMSuccess(String url) { unmanagedAM = true; when(submissionContext.getUnmanagedAM()).thenReturn(true); // submit AM and check it goes to LAUNCHED state @@ -615,7 +633,7 @@ public class TestRMAppAttemptTransitions { applicationAttempt.getAppAttemptId()); // launch AM - runApplicationAttempt(null, "host", 8042, "oldtrackingurl", true); + runApplicationAttempt(null, "host", 8042, url, true); // complete a container applicationAttempt.handle(new RMAppAttemptContainerAcquiredEvent( @@ -623,13 +641,12 @@ public class TestRMAppAttemptTransitions { applicationAttempt.handle(new RMAppAttemptContainerFinishedEvent( applicationAttempt.getAppAttemptId(), mock(ContainerStatus.class))); // complete AM - String trackingUrl = "mytrackingurl"; String diagnostics = "Successful"; FinalApplicationStatus finalStatus = FinalApplicationStatus.SUCCEEDED; applicationAttempt.handle(new RMAppAttemptUnregistrationEvent( - applicationAttempt.getAppAttemptId(), trackingUrl, finalStatus, + applicationAttempt.getAppAttemptId(), url, finalStatus, diagnostics)); - testAppAttemptFinishedState(null, finalStatus, trackingUrl, diagnostics, 1, + testAppAttemptFinishedState(null, finalStatus, url, diagnostics, 1, true); } @@ -824,12 +841,42 @@ public class TestRMAppAttemptTransitions { "Killed by user"); } + @Test + public void testTrackingUrlUnmanagedAM() { + testUnmanagedAMSuccess("oldTrackingUrl"); + } @Test - public void testNoTrackingUrl() { + public void testEmptyTrackingUrlUnmanagedAM() { + testUnmanagedAMSuccess(""); + } + + @Test + public void testNullTrackingUrlUnmanagedAM() { + testUnmanagedAMSuccess(null); + } + + @Test + public void testManagedAMWithTrackingUrl() { + testTrackingUrlManagedAM("theTrackingUrl"); + } + + @Test + public void testManagedAMWithEmptyTrackingUrl() { + testTrackingUrlManagedAM(""); + } + + @Test + public void testManagedAMWithNullTrackingUrl() { + testTrackingUrlManagedAM(null); + } + + private void testTrackingUrlManagedAM(String url) { Container amContainer = allocateApplicationAttempt(); launchApplicationAttempt(amContainer); - runApplicationAttempt(amContainer, "host", 8042, "", false); + runApplicationAttempt(amContainer, "host", 8042, url, false); + unregisterApplicationAttempt(amContainer, + FinalApplicationStatus.SUCCEEDED, url, "Successful"); } @Test @@ -927,4 +974,12 @@ public class TestRMAppAttemptTransitions { } } } + + private void verifyUrl(String url1, String url2) { + if (url1 == null || url1.trim().isEmpty()) { + assertEquals("N/A", url2); + } else { + assertEquals(url1, url2); + } + } }