diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 3bf92ec500e..1176a21cfaa 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -511,6 +511,9 @@ Release 2.6.0 - UNRELEASED HADOOP-10373 create tools/hadoop-amazon for aws/EMR support (stevel) + HADOOP-11074. Move s3-related FS connector code to hadoop-aws (David S. + Wang via Colin Patrick McCabe) + OPTIMIZATIONS HADOOP-10838. Byte array native checksumming. (James Thomas via todd) @@ -787,7 +790,10 @@ Release 2.6.0 - UNRELEASED HADOOP-11085. Excessive logging by org.apache.hadoop.util.Progress when value is NaN (Mit Desai via jlowe) -Release 2.5.1 - UNRELEASED + HADOOP-11083. After refactoring of HTTP proxyuser to common, doAs param is + case sensitive. (tucu) + +Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java index 37474e9ae16..64a562254b7 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationFilter.java @@ -188,7 +188,8 @@ static String getDoAs(HttpServletRequest request) { UTF8_CHARSET); if (list != null) { for (NameValuePair nv : list) { - if (DelegationTokenAuthenticatedURL.DO_AS.equals(nv.getName())) { + if (DelegationTokenAuthenticatedURL.DO_AS. + equalsIgnoreCase(nv.getName())) { return nv.getValue(); } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java index 118abff2a56..189a334ce25 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/token/delegation/web/TestWebDelegationToken.java @@ -795,6 +795,23 @@ public void testProxyUser() throws Exception { jetty.start(); final URL url = new URL(getJettyURL() + "/foo/bar"); + // proxyuser using raw HTTP, verifying doAs is case insensitive + String strUrl = String.format("%s?user.name=%s&doas=%s", + url.toExternalForm(), FOO_USER, OK_USER); + HttpURLConnection conn = + (HttpURLConnection) new URL(strUrl).openConnection(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + List ret = IOUtils.readLines(conn.getInputStream()); + Assert.assertEquals(1, ret.size()); + Assert.assertEquals(OK_USER, ret.get(0)); + strUrl = String.format("%s?user.name=%s&DOAS=%s", url.toExternalForm(), + FOO_USER, OK_USER); + conn = (HttpURLConnection) new URL(strUrl).openConnection(); + Assert.assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + ret = IOUtils.readLines(conn.getInputStream()); + Assert.assertEquals(1, ret.size()); + Assert.assertEquals(OK_USER, ret.get(0)); + UserGroupInformation ugi = UserGroupInformation.createRemoteUser(FOO_USER); ugi.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index c82c9fd6989..d5bf64298e8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -777,7 +777,10 @@ Release 2.6.0 - UNRELEASED HDFS-7042. Upgrade fails for Windows HA cluster due to file locks held during rename in JournalNode. (cnauroth) -Release 2.5.1 - UNRELEASED + HDFS-7051. TestDataNodeRollingUpgrade#isBlockFileInPrevious assumes Unix file + path separator. (cnauroth) + +Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java index befb298e3b6..7fd8398a28a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeRollingUpgrade.java @@ -157,7 +157,9 @@ private void ensureTrashRestored(File blockFile, File trashFile) } private boolean isBlockFileInPrevious(File blockFile) { - Pattern blockFilePattern = Pattern.compile("^(.*/current/.*/)(current)(/.*)$"); + Pattern blockFilePattern = Pattern.compile(String.format( + "^(.*%1$scurrent%1$s.*%1$s)(current)(%1$s.*)$", + Pattern.quote(File.separator))); Matcher matcher = blockFilePattern.matcher(blockFile.toString()); String previousFileName = matcher.replaceFirst("$1" + "previous" + "$3"); return ((new File(previousFileName)).exists()); @@ -355,7 +357,7 @@ public void testWithLayoutChangeAndFinalize() throws Exception { assertTrue(isBlockFileInPrevious(blockFiles[1])); assertFalse(isTrashRootPresent()); - // Rollback and ensure that neither block file exists in trash or previous. + // Finalize and ensure that neither block file exists in trash or previous. finalizeRollingUpgrade(); assertFalse(isTrashRootPresent()); assertFalse(isBlockFileInPrevious(blockFiles[0])); diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 0c065c56941..f1435d2dba2 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -282,7 +282,7 @@ Release 2.6.0 - UNRELEASED MAPREDUCE-6070. yarn.app.am.resource.mb/cpu-vcores affects uber mode but is not documented (Tsuyoshi OZAWA via jlowe) -Release 2.5.1 - UNRELEASED +Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/CheckpointAMPreemptionPolicy.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/CheckpointAMPreemptionPolicy.java index 57e0bce1ff7..ce4ec7179f4 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/CheckpointAMPreemptionPolicy.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/preemption/CheckpointAMPreemptionPolicy.java @@ -194,7 +194,7 @@ public void preempt(Context ctxt, PreemptionMessage preemptionRequests) { Collections.sort(listOfCont, new Comparator() { @Override public int compare(final Container o1, final Container o2) { - return o2.getId().getId() - o1.getId().getId(); + return o2.getId().compareTo(o1.getId()); } }); diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index eac9f3c73c4..bb73dc703f9 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -73,6 +73,9 @@ Release 2.6.0 - UNRELEASED YARN-2440. Enabled Nodemanagers to limit the aggregate cpu usage across all containers to a preconfigured limit. (Varun Vasudev via vinodkv) + YARN-2033. YARN-2033. Merging generic-history into the Timeline Store + (Zhijie Shen via junping_du) + IMPROVEMENTS YARN-2197. Add a link to YARN CHANGES.txt in the left side of doc @@ -205,6 +208,12 @@ Release 2.6.0 - UNRELEASED YARN-2448. Changed ApplicationMasterProtocol to expose RM-recognized resource types to the AMs. (Varun Vasudev via vinodkv) + YARN-2538. Added logs when RM sends roll-overed AMRMToken to AM. (Xuan Gong + via zjshen) + + YARN-2229. Changed the integer field of ContainerId to be long type. + (Tsuyoshi OZAWA via jianhe) + OPTIMIZATIONS BUG FIXES @@ -326,7 +335,12 @@ Release 2.6.0 - UNRELEASED YARN-2534. FairScheduler: Potential integer overflow calculating totalMaxShare. (Zhihai Xu via kasha) -Release 2.5.1 - UNRELEASED + YARN-2541. Fixed ResourceManagerRest.apt.vm table syntax error. (jianhe) + + YARN-2484. FileSystemRMStateStore#readFile/writeFile should close + FSData(In|Out)putStream in final block (Tsuyoshi OZAWA via jlowe) + +Release 2.5.1 - 2014-09-05 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java index 031573f3c36..53c18ae3cef 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptReport.java @@ -51,14 +51,15 @@ public abstract class ApplicationAttemptReport { @Unstable public static ApplicationAttemptReport newInstance( ApplicationAttemptId applicationAttemptId, String host, int rpcPort, - String url, String diagnostics, YarnApplicationAttemptState state, - ContainerId amContainerId) { + String url, String oUrl, String diagnostics, + YarnApplicationAttemptState state, ContainerId amContainerId) { ApplicationAttemptReport report = Records.newRecord(ApplicationAttemptReport.class); report.setApplicationAttemptId(applicationAttemptId); report.setHost(host); report.setRpcPort(rpcPort); report.setTrackingUrl(url); + report.setOriginalTrackingUrl(oUrl); report.setDiagnostics(diagnostics); report.setYarnApplicationAttemptState(state); report.setAMContainerId(amContainerId); @@ -135,6 +136,19 @@ public abstract void setYarnApplicationAttemptState( @Unstable public abstract void setTrackingUrl(String url); + /** + * Get the original tracking url for the application attempt. + * + * @return original tracking url for the application attempt + */ + @Public + @Unstable + public abstract String getOriginalTrackingUrl(); + + @Private + @Unstable + public abstract void setOriginalTrackingUrl(String oUrl); + /** * Get the ApplicationAttemptId of this attempt of the * application diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java index 321052b062b..d03a439d5f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java @@ -41,9 +41,9 @@ public abstract class ContainerId implements Comparable{ @Private @Unstable public static ContainerId newInstance(ApplicationAttemptId appAttemptId, - int containerId) { + long containerId) { ContainerId id = Records.newRecord(ContainerId.class); - id.setId(containerId); + id.setContainerId(containerId); id.setApplicationAttemptId(appAttemptId); id.build(); return id; @@ -74,16 +74,28 @@ public static ContainerId newInstance(ApplicationAttemptId appAttemptId, protected abstract void setApplicationAttemptId(ApplicationAttemptId atId); /** - * Get the identifier of the ContainerId. - * @return identifier of the ContainerId + * Get the lower 32 bits of identifier of the ContainerId, + * which doesn't include epoch. Note that this method will be marked as + * deprecated, so please use getContainerId instead. + * @return lower 32 bits of identifier of the ContainerId */ @Public @Stable public abstract int getId(); + /** + * Get the identifier of the ContainerId. Upper 24 bits are + * reserved as epoch of cluster, and lower 40 bits are reserved as + * sequential number of containers. + * @return identifier of the ContainerId + */ + @Public + @Unstable + public abstract long getContainerId(); + @Private @Unstable - protected abstract void setId(int id); + protected abstract void setContainerId(long id); // TODO: fail the app submission if attempts are more than 10 or something @@ -109,14 +121,12 @@ public NumberFormat initialValue() { return fmt; } }; - + @Override public int hashCode() { - // Generated by eclipse. - final int prime = 435569; - int result = 7507; - result = prime * result + getId(); - result = prime * result + getApplicationAttemptId().hashCode(); + // Generated by IntelliJ IDEA 13.1. + int result = (int) (getContainerId() ^ (getContainerId() >>> 32)); + result = 31 * result + getApplicationAttemptId().hashCode(); return result; } @@ -131,7 +141,7 @@ public boolean equals(Object obj) { ContainerId other = (ContainerId) obj; if (!this.getApplicationAttemptId().equals(other.getApplicationAttemptId())) return false; - if (this.getId() != other.getId()) + if (this.getContainerId() != other.getContainerId()) return false; return true; } @@ -140,12 +150,12 @@ public boolean equals(Object obj) { public int compareTo(ContainerId other) { if (this.getApplicationAttemptId().compareTo( other.getApplicationAttemptId()) == 0) { - return this.getId() - other.getId(); + return Long.valueOf(getContainerId()) + .compareTo(Long.valueOf(other.getContainerId())); } else { return this.getApplicationAttemptId().compareTo( other.getApplicationAttemptId()); } - } @Override @@ -159,8 +169,8 @@ public String toString() { sb.append( appAttemptIdAndEpochFormat.get().format( getApplicationAttemptId().getAttemptId())).append("_"); - sb.append(containerIdFormat.get().format(0x3fffff & getId())); - int epoch = getId() >> 22; + sb.append(containerIdFormat.get().format(0xffffffffffL & getContainerId())); + long epoch = getContainerId() >> 40; if (epoch > 0) { sb.append("_").append(appAttemptIdAndEpochFormat.get().format(epoch)); } @@ -177,12 +187,12 @@ public static ContainerId fromString(String containerIdStr) { } try { ApplicationAttemptId appAttemptID = toApplicationAttemptId(it); - int id = Integer.parseInt(it.next()); - int epoch = 0; + long id = Long.parseLong(it.next()); + long epoch = 0; if (it.hasNext()) { epoch = Integer.parseInt(it.next()); } - int cid = (epoch << 22) | id; + long cid = (epoch << 40) | id; ContainerId containerId = ContainerId.newInstance(appAttemptID, cid); return containerId; } catch (NumberFormatException n) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 7c71a1717f1..43f510de376 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -316,6 +316,19 @@ public class YarnConfiguration extends Configuration { public static final int DEFAULT_RM_HISTORY_WRITER_MULTI_THREADED_DISPATCHER_POOL_SIZE = 10; + /** + * The setting that controls whether yarn system metrics is published on the + * timeline server or not by RM. + */ + public static final String RM_SYSTEM_METRICS_PUBLISHER_ENABLED = + RM_PREFIX + "system-metrics-publisher.enabled"; + public static final boolean DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED = false; + + public static final String RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE = + RM_PREFIX + "system-metrics-publisher.dispatcher.pool-size"; + public static final int DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE = + 10; + //Delegation token related keys public static final String DELEGATION_KEY_UPDATE_INTERVAL_KEY = RM_PREFIX + "delegation.key.update-interval"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 7e7f21b6a44..d84de4aee58 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -50,7 +50,7 @@ message ApplicationAttemptIdProto { message ContainerIdProto { optional ApplicationIdProto app_id = 1; optional ApplicationAttemptIdProto app_attempt_id = 2; - optional int32 id = 3; + optional int64 id = 3; } message ResourceProto { @@ -202,6 +202,7 @@ message ApplicationAttemptReportProto { optional string diagnostics = 5 [default = "N/A"]; optional YarnApplicationAttemptStateProto yarn_application_attempt_state = 6; optional ContainerIdProto am_container_id = 7; + optional string original_tracking_url = 8; } enum NodeStateProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java index 72cb1b1684c..120538c8ec1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/ProtocolHATestBase.java @@ -676,7 +676,7 @@ public List createFakeQueueUserACLInfoList() { public ApplicationAttemptReport createFakeApplicationAttemptReport() { return ApplicationAttemptReport.newInstance( - createFakeApplicationAttemptId(), "localhost", 0, "", "", + createFakeApplicationAttemptId(), "localhost", 0, "", "", "", YarnApplicationAttemptState.RUNNING, createFakeContainerId()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java index 797faa51031..d3c182b9cf4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAHSClient.java @@ -346,6 +346,7 @@ private void createAppReports() { "host", 124, "url", + "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( @@ -357,6 +358,7 @@ private void createAppReports() { "host", 124, "url", + "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index 6407f7a1089..8259893af37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -457,6 +457,7 @@ private List createAppReports() { "host", 124, "url", + "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( @@ -467,6 +468,7 @@ private List createAppReports() { "host", 124, "url", + "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java index 47fa5ec6d0c..1a593d207f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java @@ -133,7 +133,7 @@ public void testGetApplicationAttemptReport() throws Exception { ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance( applicationId, 1); ApplicationAttemptReport attemptReport = ApplicationAttemptReport - .newInstance(attemptId, "host", 124, "url", "diagnostics", + .newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( attemptId, 1)); when( @@ -169,11 +169,11 @@ public void testGetApplicationAttempts() throws Exception { ApplicationAttemptId attemptId1 = ApplicationAttemptId.newInstance( applicationId, 2); ApplicationAttemptReport attemptReport = ApplicationAttemptReport - .newInstance(attemptId, "host", 124, "url", "diagnostics", + .newInstance(attemptId, "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( attemptId, 1)); ApplicationAttemptReport attemptReport1 = ApplicationAttemptReport - .newInstance(attemptId1, "host", 124, "url", "diagnostics", + .newInstance(attemptId1, "host", 124, "url", "oUrl", "diagnostics", YarnApplicationAttemptState.FINISHED, ContainerId.newInstance( attemptId1, 1)); List reports = new ArrayList(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java index 89999872e0a..c3c0221ce78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationAttemptReportPBImpl.java @@ -87,6 +87,15 @@ public String getTrackingUrl() { return p.getTrackingUrl(); } + @Override + public String getOriginalTrackingUrl() { + ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasOriginalTrackingUrl()) { + return null; + } + return p.getOriginalTrackingUrl(); + } + @Override public String getDiagnostics() { ApplicationAttemptReportProtoOrBuilder p = viaProto ? proto : builder; @@ -160,6 +169,16 @@ public void setTrackingUrl(String url) { builder.setTrackingUrl(url); } + @Override + public void setOriginalTrackingUrl(String oUrl) { + maybeInitBuilder(); + if (oUrl == null) { + builder.clearOriginalTrackingUrl(); + return; + } + builder.setOriginalTrackingUrl(oUrl); + } + @Override public void setDiagnostics(String diagnostics) { maybeInitBuilder(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java index 9be829fb490..629e95afc0f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java @@ -49,12 +49,18 @@ public ContainerIdProto getProto() { @Override public int getId() { + Preconditions.checkNotNull(proto); + return (int) proto.getId(); + } + + @Override + public long getContainerId() { Preconditions.checkNotNull(proto); return proto.getId(); } @Override - protected void setId(int id) { + protected void setContainerId(long id) { Preconditions.checkNotNull(builder); builder.setId((id)); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java index 8f885bb586f..6ee0d1c9c32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/view/HtmlBlock.java @@ -21,6 +21,7 @@ import java.io.PrintWriter; import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.webapp.MimeType; import org.apache.hadoop.yarn.webapp.SubView; import org.apache.hadoop.yarn.webapp.WebAppException; @@ -81,4 +82,15 @@ public void renderPartial() { * @param html the block to render */ protected abstract void render(Block html); + + protected UserGroupInformation getCallerUGI() { + // Check for the authorization. + String remoteUser = request().getRemoteUser(); + UserGroupInformation callerUGI = null; + if (remoteUser != null) { + callerUGI = UserGroupInformation.createRemoteUser(remoteUser); + } + return callerUGI; + } + } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index 04e458cd722..55e5bcaf5d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -615,24 +615,32 @@ org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy - - Number of worker threads that write the history data. - yarn.resourcemanager.history-writer.multi-threaded-dispatcher.pool-size - 10 - - The class to use as the configuration provider. If org.apache.hadoop.yarn.LocalConfigurationProvider is used, the local configuration will be loaded. If org.apache.hadoop.yarn.FileSystemBasedConfigurationProvider is used, the configuration which will be loaded should be uploaded to remote File system first. - > + yarn.resourcemanager.configuration.provider-class org.apache.hadoop.yarn.LocalConfigurationProvider + + The setting that controls whether yarn system metrics is + published on the timeline server or not by RM. + yarn.resourcemanager.system-metrics-publisher.enabled + false + + + + Number of worker threads that send the yarn system metrics + data. + yarn.resourcemanager.system-metrics-publisher.dispatcher.pool-size + 10 + + The hostname of the NM. @@ -1319,38 +1327,6 @@ /etc/krb5.keytab - - Indicate to ResourceManager as well as clients whether - history-service is enabled or not. If enabled, ResourceManager starts - recording historical data that ApplicationHistory service can consume. - Similarly, clients can redirect to the history service when applications - finish if this is enabled. - yarn.timeline-service.generic-application-history.enabled - false - - - - URI pointing to the location of the FileSystem path where - the history will be persisted. This must be supplied when using - org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore - as the value for yarn.timeline-service.generic-application-history.store-class - yarn.timeline-service.generic-application-history.fs-history-store.uri - ${hadoop.tmp.dir}/yarn/timeline/generic-history - - - - T-file compression types used to compress history data. - yarn.timeline-service.generic-application-history.fs-history-store.compression-type - none - - - - Store class name for history store, defaulting to file - system store - yarn.timeline-service.generic-application-history.store-class - org.apache.hadoop.yarn.server.applicationhistoryservice.FileSystemApplicationHistoryStore - - The interval that the yarn client library uses to poll the diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java index 8baf244dda2..c11fd9624a8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java @@ -54,18 +54,26 @@ public void testContainerId() { long ts = System.currentTimeMillis(); ContainerId c6 = newContainerId(36473, 4365472, ts, 25645811); Assert.assertEquals("container_10_0001_01_000001", c1.toString()); - Assert.assertEquals(c1, - ContainerId.fromString("container_10_0001_01_000001")); - Assert.assertEquals(479987, 0x003fffff & c6.getId()); - Assert.assertEquals(6, c6.getId() >> 22); - Assert.assertEquals("container_" + ts + "_36473_4365472_479987_06", + Assert.assertEquals(25645811, 0xffffffffffL & c6.getContainerId()); + Assert.assertEquals(0, c6.getContainerId() >> 40); + Assert.assertEquals("container_" + ts + "_36473_4365472_25645811", c6.toString()); - Assert.assertEquals(c6, - ContainerId.fromString("container_" + ts + "_36473_4365472_479987_06")); + + ContainerId c7 = newContainerId(36473, 4365472, ts, 4298334883325L); + Assert.assertEquals(999799999997L, 0xffffffffffL & c7.getContainerId()); + Assert.assertEquals(3, c7.getContainerId() >> 40); + Assert.assertEquals( + "container_" + ts + "_36473_4365472_999799999997_03", c7.toString()); + + ContainerId c8 = newContainerId(36473, 4365472, ts, 844424930131965L); + Assert.assertEquals(1099511627773L, 0xffffffffffL & c8.getContainerId()); + Assert.assertEquals(767, c8.getContainerId() >> 40); + Assert.assertEquals( + "container_" + ts + "_36473_4365472_1099511627773_767", c8.toString()); } public static ContainerId newContainerId(int appId, int appAttemptId, - long timestamp, int containerId) { + long timestamp, long containerId) { ApplicationId applicationId = ApplicationId.newInstance(timestamp, appId); ApplicationAttemptId applicationAttemptId = ApplicationAttemptId.newInstance(applicationId, appAttemptId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java index 3f4147c4189..f7e7fe439ee 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/TestConverterUtils.java @@ -59,9 +59,24 @@ public void testContainerId() throws URISyntaxException { public void testContainerIdWithEpoch() throws URISyntaxException { ContainerId id = TestContainerId.newContainerId(0, 0, 0, 25645811); String cid = ConverterUtils.toString(id); - assertEquals("container_0_0000_00_479987_06", cid); + assertEquals("container_0_0000_00_25645811", cid); ContainerId gen = ConverterUtils.toContainerId(cid); assertEquals(gen.toString(), id.toString()); + + long ts = System.currentTimeMillis(); + ContainerId id2 = + TestContainerId.newContainerId(36473, 4365472, ts, 4298334883325L); + String cid2 = ConverterUtils.toString(id2); + assertEquals("container_" + ts + "_36473_4365472_999799999997_03", cid2); + ContainerId gen2 = ConverterUtils.toContainerId(cid2); + assertEquals(gen2.toString(), id2.toString()); + + ContainerId id3 = + TestContainerId.newContainerId(36473, 4365472, ts, 844424930131965L); + String cid3 = ConverterUtils.toString(id3); + assertEquals("container_" + ts + "_36473_4365472_1099511627773_767", cid3); + ContainerId gen3 = ConverterUtils.toContainerId(cid3); + assertEquals(gen3.toString(), id3.toString()); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java index b56a5951bba..803dc01d1d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerImpl.java @@ -163,7 +163,7 @@ private ApplicationAttemptReport convertToApplicationAttemptReport( ApplicationAttemptHistoryData appAttemptHistory) { return ApplicationAttemptReport.newInstance( appAttemptHistory.getApplicationAttemptId(), appAttemptHistory.getHost(), - appAttemptHistory.getRPCPort(), appAttemptHistory.getTrackingURL(), + appAttemptHistory.getRPCPort(), appAttemptHistory.getTrackingURL(), null, appAttemptHistory.getDiagnosticsInfo(), appAttemptHistory.getYarnApplicationAttemptState(), appAttemptHistory.getMasterContainerId()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java new file mode 100644 index 00000000000..23b92184c5d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java @@ -0,0 +1,511 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.io.IOException; +import java.util.EnumSet; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.service.AbstractService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerExitStatus; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; +import org.apache.hadoop.yarn.exceptions.ApplicationAttemptNotFoundException; +import org.apache.hadoop.yarn.exceptions.ApplicationNotFoundException; +import org.apache.hadoop.yarn.exceptions.ContainerNotFoundException; +import org.apache.hadoop.yarn.exceptions.YarnException; +import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants; +import org.apache.hadoop.yarn.server.timeline.NameValuePair; +import org.apache.hadoop.yarn.server.timeline.TimelineDataManager; +import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.webapp.util.WebAppUtils; + +public class ApplicationHistoryManagerOnTimelineStore extends AbstractService + implements + ApplicationHistoryManager { + + private TimelineDataManager timelineDataManager; + private String serverHttpAddress; + + public ApplicationHistoryManagerOnTimelineStore( + TimelineDataManager timelineDataManager) { + super(ApplicationHistoryManagerOnTimelineStore.class.getName()); + this.timelineDataManager = timelineDataManager; + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + serverHttpAddress = WebAppUtils.getHttpSchemePrefix(conf) + + WebAppUtils.getAHSWebAppURLWithoutScheme(conf); + super.serviceInit(conf); + } + + @Override + public ApplicationReport getApplication(ApplicationId appId) + throws YarnException, IOException { + return getApplication(appId, ApplicationReportField.ALL); + } + + @Override + public Map getAllApplications() + throws YarnException, IOException { + TimelineEntities entities = timelineDataManager.getEntities( + ApplicationMetricsConstants.ENTITY_TYPE, null, null, null, null, + null, null, Long.MAX_VALUE, EnumSet.allOf(Field.class), + UserGroupInformation.getLoginUser()); + Map apps = + new HashMap(); + if (entities != null && entities.getEntities() != null) { + for (TimelineEntity entity : entities.getEntities()) { + ApplicationReport app = + generateApplicationReport(entity, ApplicationReportField.ALL); + apps.put(app.getApplicationId(), app); + } + } + return apps; + } + + @Override + public Map + getApplicationAttempts(ApplicationId appId) + throws YarnException, IOException { + TimelineEntities entities = timelineDataManager.getEntities( + AppAttemptMetricsConstants.ENTITY_TYPE, + new NameValuePair( + AppAttemptMetricsConstants.PARENT_PRIMARY_FILTER, appId + .toString()), null, null, null, null, null, + Long.MAX_VALUE, EnumSet.allOf(Field.class), + UserGroupInformation.getLoginUser()); + Map appAttempts = + new HashMap(); + if (entities != null && entities.getEntities() != null) { + for (TimelineEntity entity : entities.getEntities()) { + ApplicationAttemptReport appAttempt = + convertToApplicationAttemptReport(entity); + appAttempts.put(appAttempt.getApplicationAttemptId(), appAttempt); + } + } else { + // It is likely that the attemtps are not found due to non-existing + // application. In this case, we need to throw ApplicationNotFoundException. + getApplication(appId, ApplicationReportField.NONE); + } + return appAttempts; + } + + @Override + public ApplicationAttemptReport getApplicationAttempt( + ApplicationAttemptId appAttemptId) throws YarnException, IOException { + TimelineEntity entity = timelineDataManager.getEntity( + AppAttemptMetricsConstants.ENTITY_TYPE, + appAttemptId.toString(), EnumSet.allOf(Field.class), + UserGroupInformation.getLoginUser()); + if (entity == null) { + // Will throw ApplicationNotFoundException first + getApplication(appAttemptId.getApplicationId(), ApplicationReportField.NONE); + throw new ApplicationAttemptNotFoundException( + "The entity for application attempt " + appAttemptId + + " doesn't exist in the timeline store"); + } else { + return convertToApplicationAttemptReport(entity); + } + } + + @Override + public ContainerReport getContainer(ContainerId containerId) + throws YarnException, IOException { + ApplicationReport app = getApplication( + containerId.getApplicationAttemptId().getApplicationId(), + ApplicationReportField.USER); + TimelineEntity entity = timelineDataManager.getEntity( + ContainerMetricsConstants.ENTITY_TYPE, + containerId.toString(), EnumSet.allOf(Field.class), + UserGroupInformation.getLoginUser()); + if (entity == null) { + throw new ContainerNotFoundException( + "The entity for container " + containerId + + " doesn't exist in the timeline store"); + } else { + return convertToContainerReport(entity, serverHttpAddress, app.getUser()); + } + } + + @Override + public ContainerReport getAMContainer(ApplicationAttemptId appAttemptId) + throws YarnException, IOException { + ApplicationAttemptReport appAttempt = getApplicationAttempt(appAttemptId); + return getContainer(appAttempt.getAMContainerId()); + } + + @Override + public Map getContainers( + ApplicationAttemptId appAttemptId) throws YarnException, IOException { + ApplicationReport app = getApplication( + appAttemptId.getApplicationId(), ApplicationReportField.USER); + TimelineEntities entities = timelineDataManager.getEntities( + ContainerMetricsConstants.ENTITY_TYPE, + new NameValuePair( + ContainerMetricsConstants.PARENT_PRIMARIY_FILTER, + appAttemptId.toString()), null, null, null, + null, null, Long.MAX_VALUE, EnumSet.allOf(Field.class), + UserGroupInformation.getLoginUser()); + Map containers = + new HashMap(); + if (entities != null && entities.getEntities() != null) { + for (TimelineEntity entity : entities.getEntities()) { + ContainerReport container = + convertToContainerReport(entity, serverHttpAddress, app.getUser()); + containers.put(container.getContainerId(), container); + } + } + return containers; + } + + private static ApplicationReport convertToApplicationReport( + TimelineEntity entity, ApplicationReportField field) { + String user = null; + String queue = null; + String name = null; + String type = null; + long createdTime = 0; + long finishedTime = 0; + ApplicationAttemptId latestApplicationAttemptId = null; + String diagnosticsInfo = null; + FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED; + YarnApplicationState state = null; + if (field == ApplicationReportField.NONE) { + return ApplicationReport.newInstance( + ConverterUtils.toApplicationId(entity.getEntityId()), + latestApplicationAttemptId, user, queue, name, null, -1, null, state, + diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null, + null, 1.0F, type, null); + } + Map entityInfo = entity.getOtherInfo(); + if (entityInfo != null) { + if (entityInfo.containsKey(ApplicationMetricsConstants.USER_ENTITY_INFO)) { + user = + entityInfo.get(ApplicationMetricsConstants.USER_ENTITY_INFO) + .toString(); + } + if (field == ApplicationReportField.USER) { + return ApplicationReport.newInstance( + ConverterUtils.toApplicationId(entity.getEntityId()), + latestApplicationAttemptId, user, queue, name, null, -1, null, state, + diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null, + null, 1.0F, type, null); + } + if (entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) { + queue = + entityInfo.get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO) + .toString(); + } + if (entityInfo.containsKey(ApplicationMetricsConstants.NAME_ENTITY_INFO)) { + name = + entityInfo.get(ApplicationMetricsConstants.NAME_ENTITY_INFO) + .toString(); + } + if (entityInfo.containsKey(ApplicationMetricsConstants.TYPE_ENTITY_INFO)) { + type = + entityInfo.get(ApplicationMetricsConstants.TYPE_ENTITY_INFO) + .toString(); + } + } + List events = entity.getEvents(); + if (events != null) { + for (TimelineEvent event : events) { + if (event.getEventType().equals( + ApplicationMetricsConstants.CREATED_EVENT_TYPE)) { + createdTime = event.getTimestamp(); + } else if (event.getEventType().equals( + ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) { + finishedTime = event.getTimestamp(); + Map eventInfo = event.getEventInfo(); + if (eventInfo == null) { + continue; + } + if (eventInfo + .containsKey(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO)) { + latestApplicationAttemptId = + ConverterUtils + .toApplicationAttemptId( + eventInfo + .get( + ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO) + .toString()); + } + if (eventInfo + .containsKey(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) { + diagnosticsInfo = + eventInfo.get( + ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO) + .toString(); + } + if (eventInfo + .containsKey(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO)) { + finalStatus = + FinalApplicationStatus.valueOf(eventInfo.get( + ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO) + .toString()); + } + if (eventInfo + .containsKey(ApplicationMetricsConstants.STATE_EVENT_INFO)) { + state = + YarnApplicationState.valueOf(eventInfo.get( + ApplicationMetricsConstants.STATE_EVENT_INFO).toString()); + } + } + } + } + return ApplicationReport.newInstance( + ConverterUtils.toApplicationId(entity.getEntityId()), + latestApplicationAttemptId, user, queue, name, null, -1, null, state, + diagnosticsInfo, null, createdTime, finishedTime, finalStatus, null, + null, 1.0F, type, null); + } + + private static ApplicationAttemptReport convertToApplicationAttemptReport( + TimelineEntity entity) { + String host = null; + int rpcPort = -1; + ContainerId amContainerId = null; + String trackingUrl = null; + String originalTrackingUrl = null; + String diagnosticsInfo = null; + YarnApplicationAttemptState state = null; + List events = entity.getEvents(); + if (events != null) { + for (TimelineEvent event : events) { + if (event.getEventType().equals( + AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE)) { + Map eventInfo = event.getEventInfo(); + if (eventInfo == null) { + continue; + } + if (eventInfo.containsKey(AppAttemptMetricsConstants.HOST_EVENT_INFO)) { + host = + eventInfo.get(AppAttemptMetricsConstants.HOST_EVENT_INFO) + .toString(); + } + if (eventInfo + .containsKey(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)) { + rpcPort = (Integer) eventInfo.get( + AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO); + } + if (eventInfo + .containsKey(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)) { + amContainerId = + ConverterUtils.toContainerId(eventInfo.get( + AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO) + .toString()); + } + } else if (event.getEventType().equals( + AppAttemptMetricsConstants.FINISHED_EVENT_TYPE)) { + Map eventInfo = event.getEventInfo(); + if (eventInfo == null) { + continue; + } + if (eventInfo + .containsKey(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)) { + trackingUrl = + eventInfo.get( + AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO) + .toString(); + } + if (eventInfo + .containsKey(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)) { + originalTrackingUrl = + eventInfo + .get( + AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO) + .toString(); + } + if (eventInfo + .containsKey(AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) { + diagnosticsInfo = + eventInfo.get( + AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO) + .toString(); + } + if (eventInfo + .containsKey(AppAttemptMetricsConstants.STATE_EVENT_INFO)) { + state = + YarnApplicationAttemptState.valueOf(eventInfo.get( + AppAttemptMetricsConstants.STATE_EVENT_INFO) + .toString()); + } + } + } + } + return ApplicationAttemptReport.newInstance( + ConverterUtils.toApplicationAttemptId(entity.getEntityId()), + host, rpcPort, trackingUrl, originalTrackingUrl, diagnosticsInfo, + state, amContainerId); + } + + private static ContainerReport convertToContainerReport( + TimelineEntity entity, String serverHttpAddress, String user) { + int allocatedMem = 0; + int allocatedVcore = 0; + String allocatedHost = null; + int allocatedPort = -1; + int allocatedPriority = 0; + long createdTime = 0; + long finishedTime = 0; + String diagnosticsInfo = null; + int exitStatus = ContainerExitStatus.INVALID; + ContainerState state = null; + Map entityInfo = entity.getOtherInfo(); + if (entityInfo != null) { + if (entityInfo + .containsKey(ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO)) { + allocatedMem = (Integer) entityInfo.get( + ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO); + } + if (entityInfo + .containsKey(ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO)) { + allocatedVcore = (Integer) entityInfo.get( + ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO); + } + if (entityInfo + .containsKey(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO)) { + allocatedHost = + entityInfo + .get(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO) + .toString(); + } + if (entityInfo + .containsKey(ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO)) { + allocatedPort = (Integer) entityInfo.get( + ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO); + } + if (entityInfo + .containsKey(ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO)) { + allocatedPriority = (Integer) entityInfo.get( + ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO); + } + } + List events = entity.getEvents(); + if (events != null) { + for (TimelineEvent event : events) { + if (event.getEventType().equals( + ContainerMetricsConstants.CREATED_EVENT_TYPE)) { + createdTime = event.getTimestamp(); + } else if (event.getEventType().equals( + ContainerMetricsConstants.FINISHED_EVENT_TYPE)) { + finishedTime = event.getTimestamp(); + Map eventInfo = event.getEventInfo(); + if (eventInfo == null) { + continue; + } + if (eventInfo + .containsKey(ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)) { + diagnosticsInfo = + eventInfo.get( + ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO) + .toString(); + } + if (eventInfo + .containsKey(ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO)) { + exitStatus = (Integer) eventInfo.get( + ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO); + } + if (eventInfo + .containsKey(ContainerMetricsConstants.STATE_EVENT_INFO)) { + state = + ContainerState.valueOf(eventInfo.get( + ContainerMetricsConstants.STATE_EVENT_INFO).toString()); + } + } + } + } + NodeId allocatedNode = NodeId.newInstance(allocatedHost, allocatedPort); + ContainerId containerId = + ConverterUtils.toContainerId(entity.getEntityId()); + String logUrl = WebAppUtils.getAggregatedLogURL( + serverHttpAddress, + allocatedNode.toString(), + containerId.toString(), + containerId.toString(), + user); + return ContainerReport.newInstance( + ConverterUtils.toContainerId(entity.getEntityId()), + Resource.newInstance(allocatedMem, allocatedVcore), + NodeId.newInstance(allocatedHost, allocatedPort), + Priority.newInstance(allocatedPriority), + createdTime, finishedTime, diagnosticsInfo, logUrl, exitStatus, state); + } + + private ApplicationReport generateApplicationReport(TimelineEntity entity, + ApplicationReportField field) throws YarnException, IOException { + ApplicationReport app = convertToApplicationReport(entity, field); + if (field == ApplicationReportField.ALL && + app != null && app.getCurrentApplicationAttemptId() != null) { + ApplicationAttemptReport appAttempt = + getApplicationAttempt(app.getCurrentApplicationAttemptId()); + if (appAttempt != null) { + app.setHost(appAttempt.getHost()); + app.setRpcPort(appAttempt.getRpcPort()); + app.setTrackingUrl(appAttempt.getTrackingUrl()); + app.setOriginalTrackingUrl(appAttempt.getOriginalTrackingUrl()); + } + } + return app; + } + + private ApplicationReport getApplication(ApplicationId appId, + ApplicationReportField field) throws YarnException, IOException { + TimelineEntity entity = timelineDataManager.getEntity( + ApplicationMetricsConstants.ENTITY_TYPE, + appId.toString(), EnumSet.allOf(Field.class), + UserGroupInformation.getLoginUser()); + if (entity == null) { + throw new ApplicationNotFoundException("The entity for application " + + appId + " doesn't exist in the timeline store"); + } else { + return generateApplicationReport(entity, field); + } + } + + private static enum ApplicationReportField { + ALL, // retrieve all the fields + NONE, // retrieve no fields + USER // retrieve user info only + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java index 158f2e6c5f1..3c3d37a9668 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryServer.java @@ -170,7 +170,16 @@ public static void main(String[] args) { private ApplicationHistoryManager createApplicationHistoryManager( Configuration conf) { - return new ApplicationHistoryManagerImpl(); + // Backward compatibility: + // APPLICATION_HISTORY_STORE is neither null nor empty, it means that the + // user has enabled it explicitly. + if (conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE) == null || + conf.get(YarnConfiguration.APPLICATION_HISTORY_STORE).length() == 0) { + return new ApplicationHistoryManagerOnTimelineStore(timelineDataManager); + } else { + LOG.warn("The filesystem based application history store is deprecated."); + return new ApplicationHistoryManagerImpl(); + } } private TimelineStore createTimelineStore( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java index a2d91406871..7840dd18660 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/FileSystemApplicationHistoryStore.java @@ -117,7 +117,8 @@ protected FileSystem getFileSystem(Path path, Configuration conf) throws Excepti @Override public void serviceInit(Configuration conf) throws Exception { Path fsWorkingPath = - new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI)); + new Path(conf.get(YarnConfiguration.FS_APPLICATION_HISTORY_STORE_URI, + conf.get("hadoop.tmp.dir") + "/yarn/timeline/generic-history")); rootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); try { fs = getFileSystem(fsWorkingPath, conf); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java index b94711c3c61..099d5ef5888 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/timeline/MemoryTimelineStore.java @@ -49,6 +49,8 @@ * they may encounter reading and writing history data in different memory * store. * + * The methods are synchronized to avoid concurrent modification on the memory. + * */ @Private @Unstable @@ -65,7 +67,7 @@ public MemoryTimelineStore() { } @Override - public TimelineEntities getEntities(String entityType, Long limit, + public synchronized TimelineEntities getEntities(String entityType, Long limit, Long windowStart, Long windowEnd, String fromId, Long fromTs, NameValuePair primaryFilter, Collection secondaryFilters, EnumSet fields) { @@ -148,7 +150,7 @@ public TimelineEntities getEntities(String entityType, Long limit, } @Override - public TimelineEntity getEntity(String entityId, String entityType, + public synchronized TimelineEntity getEntity(String entityId, String entityType, EnumSet fieldsToRetrieve) { if (fieldsToRetrieve == null) { fieldsToRetrieve = EnumSet.allOf(Field.class); @@ -162,7 +164,7 @@ public TimelineEntity getEntity(String entityId, String entityType, } @Override - public TimelineEvents getEntityTimelines(String entityType, + public synchronized TimelineEvents getEntityTimelines(String entityType, SortedSet entityIds, Long limit, Long windowStart, Long windowEnd, Set eventTypes) { @@ -209,7 +211,7 @@ public TimelineEvents getEntityTimelines(String entityType, } @Override - public TimelinePutResponse put(TimelineEntities data) { + public synchronized TimelinePutResponse put(TimelineEntities data) { TimelinePutResponse response = new TimelinePutResponse(); for (TimelineEntity entity : data.getEntities()) { EntityIdentifier entityId = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java index 7b5b74b6755..ad2907b6031 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryClientService.java @@ -64,6 +64,7 @@ public void setup() { WebAppUtils.getAHSWebAppURLWithoutScheme(config) + "/applicationhistory/logs/localhost:0/container_0_0001_01_000001/" + "container_0_0001_01_000001/test user"; + config.setBoolean(YarnConfiguration.APPLICATION_HISTORY_ENABLED, true); config.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE, MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class); historyServer.init(config); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java new file mode 100644 index 00000000000..65eafc61a34 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/test/java/org/apache/hadoop/yarn/server/applicationhistoryservice/TestApplicationHistoryManagerOnTimelineStore.java @@ -0,0 +1,317 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.applicationhistoryservice; + +import java.util.Collection; +import java.util.HashMap; +import java.util.Map; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEntities; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants; +import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore; +import org.apache.hadoop.yarn.server.timeline.TimelineDataManager; +import org.apache.hadoop.yarn.server.timeline.TimelineStore; +import org.apache.hadoop.yarn.server.timeline.security.TimelineACLsManager; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestApplicationHistoryManagerOnTimelineStore { + + private static ApplicationHistoryManagerOnTimelineStore historyManager; + private static final int SCALE = 5; + + @BeforeClass + public static void setup() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + TimelineStore store = new MemoryTimelineStore(); + prepareTimelineStore(store); + TimelineACLsManager aclsManager = new TimelineACLsManager(conf); + TimelineDataManager dataManager = + new TimelineDataManager(store, aclsManager); + historyManager = new ApplicationHistoryManagerOnTimelineStore(dataManager); + historyManager.init(conf); + historyManager.start(); + } + + @AfterClass + public static void tearDown() { + if (historyManager != null) { + historyManager.stop(); + } + } + + private static void prepareTimelineStore(TimelineStore store) + throws Exception { + for (int i = 1; i <= SCALE; ++i) { + TimelineEntities entities = new TimelineEntities(); + ApplicationId appId = ApplicationId.newInstance(0, i); + entities.addEntity(createApplicationTimelineEntity(appId)); + store.put(entities); + for (int j = 1; j <= SCALE; ++j) { + entities = new TimelineEntities(); + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(appId, j); + entities.addEntity(createAppAttemptTimelineEntity(appAttemptId)); + store.put(entities); + for (int k = 1; k <= SCALE; ++k) { + entities = new TimelineEntities(); + ContainerId containerId = ContainerId.newInstance(appAttemptId, k); + entities.addEntity(createContainerEntity(containerId)); + store.put(entities); + } + } + } + } + + @Test + public void testGetApplicationReport() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + ApplicationReport app = historyManager.getApplication(appId); + Assert.assertNotNull(app); + Assert.assertEquals(appId, app.getApplicationId()); + Assert.assertEquals("test app", app.getName()); + Assert.assertEquals("test app type", app.getApplicationType()); + Assert.assertEquals("test user", app.getUser()); + Assert.assertEquals("test queue", app.getQueue()); + Assert.assertEquals(Integer.MAX_VALUE + 2L, app.getStartTime()); + Assert.assertEquals(Integer.MAX_VALUE + 3L, app.getFinishTime()); + Assert.assertTrue(Math.abs(app.getProgress() - 1.0F) < 0.0001); + Assert.assertEquals("test host", app.getHost()); + Assert.assertEquals(-100, app.getRpcPort()); + Assert.assertEquals("test tracking url", app.getTrackingUrl()); + Assert.assertEquals("test original tracking url", + app.getOriginalTrackingUrl()); + Assert.assertEquals("test diagnostics info", app.getDiagnostics()); + Assert.assertEquals(FinalApplicationStatus.UNDEFINED, + app.getFinalApplicationStatus()); + Assert.assertEquals(YarnApplicationState.FINISHED, + app.getYarnApplicationState()); + } + + @Test + public void testGetApplicationAttemptReport() throws Exception { + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1); + ApplicationAttemptReport appAttempt = + historyManager.getApplicationAttempt(appAttemptId); + Assert.assertNotNull(appAttempt); + Assert.assertEquals(appAttemptId, appAttempt.getApplicationAttemptId()); + Assert.assertEquals(ContainerId.newInstance(appAttemptId, 1), + appAttempt.getAMContainerId()); + Assert.assertEquals("test host", appAttempt.getHost()); + Assert.assertEquals(-100, appAttempt.getRpcPort()); + Assert.assertEquals("test tracking url", appAttempt.getTrackingUrl()); + Assert.assertEquals("test original tracking url", + appAttempt.getOriginalTrackingUrl()); + Assert.assertEquals("test diagnostics info", appAttempt.getDiagnostics()); + Assert.assertEquals(YarnApplicationAttemptState.FINISHED, + appAttempt.getYarnApplicationAttemptState()); + } + + @Test + public void testGetContainerReport() throws Exception { + ContainerId containerId = + ContainerId.newInstance(ApplicationAttemptId.newInstance( + ApplicationId.newInstance(0, 1), 1), 1); + ContainerReport container = historyManager.getContainer(containerId); + Assert.assertNotNull(container); + Assert.assertEquals(Integer.MAX_VALUE + 1L, container.getCreationTime()); + Assert.assertEquals(Integer.MAX_VALUE + 2L, container.getFinishTime()); + Assert.assertEquals(Resource.newInstance(-1, -1), + container.getAllocatedResource()); + Assert.assertEquals(NodeId.newInstance("test host", -100), + container.getAssignedNode()); + Assert.assertEquals(Priority.UNDEFINED, container.getPriority()); + Assert + .assertEquals("test diagnostics info", container.getDiagnosticsInfo()); + Assert.assertEquals(ContainerState.COMPLETE, container.getContainerState()); + Assert.assertEquals(-1, container.getContainerExitStatus()); + Assert.assertEquals("http://0.0.0.0:8188/applicationhistory/logs/" + + "test host:-100/container_0_0001_01_000001/" + + "container_0_0001_01_000001/test user", container.getLogUrl()); + } + + @Test + public void testGetApplications() throws Exception { + Collection apps = + historyManager.getAllApplications().values(); + Assert.assertNotNull(apps); + Assert.assertEquals(SCALE, apps.size()); + } + + @Test + public void testGetApplicationAttempts() throws Exception { + Collection appAttempts = + historyManager.getApplicationAttempts(ApplicationId.newInstance(0, 1)) + .values(); + Assert.assertNotNull(appAttempts); + Assert.assertEquals(SCALE, appAttempts.size()); + } + + @Test + public void testGetContainers() throws Exception { + Collection containers = + historyManager + .getContainers( + ApplicationAttemptId.newInstance( + ApplicationId.newInstance(0, 1), 1)).values(); + Assert.assertNotNull(containers); + Assert.assertEquals(SCALE, containers.size()); + } + + @Test + public void testGetAMContainer() throws Exception { + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1); + ContainerReport container = historyManager.getAMContainer(appAttemptId); + Assert.assertNotNull(container); + Assert.assertEquals(appAttemptId, container.getContainerId() + .getApplicationAttemptId()); + } + + private static TimelineEntity createApplicationTimelineEntity( + ApplicationId appId) { + TimelineEntity entity = new TimelineEntity(); + entity.setEntityType(ApplicationMetricsConstants.ENTITY_TYPE); + entity.setEntityId(appId.toString()); + Map entityInfo = new HashMap(); + entityInfo.put(ApplicationMetricsConstants.NAME_ENTITY_INFO, "test app"); + entityInfo.put(ApplicationMetricsConstants.TYPE_ENTITY_INFO, + "test app type"); + entityInfo.put(ApplicationMetricsConstants.USER_ENTITY_INFO, "test user"); + entityInfo.put(ApplicationMetricsConstants.QUEUE_ENTITY_INFO, "test queue"); + entityInfo.put(ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO, + Integer.MAX_VALUE + 1L); + entity.setOtherInfo(entityInfo); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType(ApplicationMetricsConstants.CREATED_EVENT_TYPE); + tEvent.setTimestamp(Integer.MAX_VALUE + 2L); + entity.addEvent(tEvent); + tEvent = new TimelineEvent(); + tEvent.setEventType( + ApplicationMetricsConstants.FINISHED_EVENT_TYPE); + tEvent.setTimestamp(Integer.MAX_VALUE + 3L); + Map eventInfo = new HashMap(); + eventInfo.put(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO, + "test diagnostics info"); + eventInfo.put(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO, + FinalApplicationStatus.UNDEFINED.toString()); + eventInfo.put(ApplicationMetricsConstants.STATE_EVENT_INFO, + YarnApplicationState.FINISHED.toString()); + eventInfo.put(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO, + ApplicationAttemptId.newInstance(appId, 1)); + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + return entity; + } + + private static TimelineEntity createAppAttemptTimelineEntity( + ApplicationAttemptId appAttemptId) { + TimelineEntity entity = new TimelineEntity(); + entity.setEntityType(AppAttemptMetricsConstants.ENTITY_TYPE); + entity.setEntityId(appAttemptId.toString()); + entity.addPrimaryFilter(AppAttemptMetricsConstants.PARENT_PRIMARY_FILTER, + appAttemptId.getApplicationId().toString()); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType(AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE); + tEvent.setTimestamp(Integer.MAX_VALUE + 1L); + Map eventInfo = new HashMap(); + eventInfo.put(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO, + "test tracking url"); + eventInfo.put(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO, + "test original tracking url"); + eventInfo.put(AppAttemptMetricsConstants.HOST_EVENT_INFO, "test host"); + eventInfo.put(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO, -100); + eventInfo.put(AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO, + ContainerId.newInstance(appAttemptId, 1)); + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + tEvent = new TimelineEvent(); + tEvent.setEventType(AppAttemptMetricsConstants.FINISHED_EVENT_TYPE); + tEvent.setTimestamp(Integer.MAX_VALUE + 2L); + eventInfo = new HashMap(); + eventInfo.put(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO, + "test tracking url"); + eventInfo.put(AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO, + "test original tracking url"); + eventInfo.put(AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO, + "test diagnostics info"); + eventInfo.put(AppAttemptMetricsConstants.FINAL_STATUS_EVENT_INFO, + FinalApplicationStatus.UNDEFINED.toString()); + eventInfo.put(AppAttemptMetricsConstants.STATE_EVENT_INFO, + YarnApplicationAttemptState.FINISHED.toString()); + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + return entity; + } + + private static TimelineEntity createContainerEntity(ContainerId containerId) { + TimelineEntity entity = new TimelineEntity(); + entity.setEntityType(ContainerMetricsConstants.ENTITY_TYPE); + entity.setEntityId(containerId.toString()); + entity.addPrimaryFilter(ContainerMetricsConstants.PARENT_PRIMARIY_FILTER, + containerId.getApplicationAttemptId().toString()); + Map entityInfo = new HashMap(); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO, -1); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO, -1); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO, + "test host"); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO, -100); + entityInfo + .put(ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO, -1); + entity.setOtherInfo(entityInfo); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType(ContainerMetricsConstants.CREATED_EVENT_TYPE); + tEvent.setTimestamp(Integer.MAX_VALUE + 1L); + entity.addEvent(tEvent); + ; + tEvent = new TimelineEvent(); + tEvent.setEventType(ContainerMetricsConstants.FINISHED_EVENT_TYPE); + tEvent.setTimestamp(Integer.MAX_VALUE + 2L); + Map eventInfo = new HashMap(); + eventInfo.put(ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO, + "test diagnostics info"); + eventInfo.put(ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO, -1); + eventInfo.put(ContainerMetricsConstants.STATE_EVENT_INFO, + ContainerState.COMPLETE.toString()); + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + return entity; + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java index 78ae0dd935a..0e2ffdfd909 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/api/ApplicationContext.java @@ -21,17 +21,14 @@ import java.io.IOException; import java.util.Map; -import org.apache.hadoop.classification.InterfaceAudience.Public; -import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerReport; +import org.apache.hadoop.yarn.exceptions.YarnException; -@Public -@Unstable public interface ApplicationContext { /** * This method returns Application {@link ApplicationReport} for the specified @@ -40,21 +37,21 @@ public interface ApplicationContext { * @param appId * * @return {@link ApplicationReport} for the ApplicationId. + * @throws YarnException * @throws IOException */ - @Public - @Unstable - ApplicationReport getApplication(ApplicationId appId) throws IOException; + ApplicationReport getApplication(ApplicationId appId) + throws YarnException, IOException; /** * This method returns all Application {@link ApplicationReport}s * * @return map of {@link ApplicationId} to {@link ApplicationReport}s. + * @throws YarnException * @throws IOException */ - @Public - @Unstable - Map getAllApplications() throws IOException; + Map getAllApplications() + throws YarnException, IOException; /** * Application can have multiple application attempts @@ -64,12 +61,11 @@ public interface ApplicationContext { * @param appId * * @return all {@link ApplicationAttemptReport}s for the Application. + * @throws YarnException * @throws IOException */ - @Public - @Unstable Map getApplicationAttempts( - ApplicationId appId) throws IOException; + ApplicationId appId) throws YarnException, IOException; /** * This method returns {@link ApplicationAttemptReport} for specified @@ -78,12 +74,11 @@ Map getApplicationAttempts( * @param appAttemptId * {@link ApplicationAttemptId} * @return {@link ApplicationAttemptReport} for ApplicationAttemptId + * @throws YarnException * @throws IOException */ - @Public - @Unstable ApplicationAttemptReport getApplicationAttempt( - ApplicationAttemptId appAttemptId) throws IOException; + ApplicationAttemptId appAttemptId) throws YarnException, IOException; /** * This method returns {@link ContainerReport} for specified @@ -92,11 +87,11 @@ ApplicationAttemptReport getApplicationAttempt( * @param containerId * {@link ContainerId} * @return {@link ContainerReport} for ContainerId + * @throws YarnException * @throws IOException */ - @Public - @Unstable - ContainerReport getContainer(ContainerId containerId) throws IOException; + ContainerReport getContainer(ContainerId containerId) + throws YarnException, IOException; /** * This method returns {@link ContainerReport} for specified @@ -105,12 +100,11 @@ ApplicationAttemptReport getApplicationAttempt( * @param appAttemptId * {@link ApplicationAttemptId} * @return {@link ContainerReport} for ApplicationAttemptId + * @throws YarnException * @throws IOException */ - @Public - @Unstable ContainerReport getAMContainer(ApplicationAttemptId appAttemptId) - throws IOException; + throws YarnException, IOException; /** * This method returns Map of {@link ContainerId} to {@link ContainerReport} @@ -120,10 +114,9 @@ ContainerReport getAMContainer(ApplicationAttemptId appAttemptId) * {@link ApplicationAttemptId} * @return Map of {@link ContainerId} to {@link ContainerReport} for * ApplicationAttemptId + * @throws YarnException * @throws IOException */ - @Public - @Unstable Map getContainers( - ApplicationAttemptId appAttemptId) throws IOException; + ApplicationAttemptId appAttemptId) throws YarnException, IOException; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AppAttemptMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AppAttemptMetricsConstants.java new file mode 100644 index 00000000000..a7809cfe2c1 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/AppAttemptMetricsConstants.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.metrics; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +@Private +@Unstable +public class AppAttemptMetricsConstants { + + public static final String ENTITY_TYPE = + "YARN_APPLICATION_ATTEMPT"; + + public static final String REGISTERED_EVENT_TYPE = + "YARN_APPLICATION_ATTEMPT_REGISTERED"; + + public static final String FINISHED_EVENT_TYPE = + "YARN_APPLICATION_ATTEMPT_FINISHED"; + + public static final String PARENT_PRIMARY_FILTER = + "YARN_APPLICATION_ATTEMPT_PARENT"; + + public static final String TRACKING_URL_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_TRACKING_URL"; + + public static final String ORIGINAL_TRACKING_URL_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_ORIGINAL_TRACKING_URL"; + + public static final String HOST_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_HOST"; + + public static final String RPC_PORT_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_RPC_PORT"; + + public static final String MASTER_CONTAINER_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_MASTER_CONTAINER"; + + public static final String DIAGNOSTICS_INFO_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_DIAGNOSTICS_INFO"; + + public static final String FINAL_STATUS_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_FINAL_STATUS"; + + public static final String STATE_EVENT_INFO = + "YARN_APPLICATION_ATTEMPT_STATE"; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java new file mode 100644 index 00000000000..f6a40bd0f8e --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ApplicationMetricsConstants.java @@ -0,0 +1,64 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.metrics; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +@Private +@Unstable +public class ApplicationMetricsConstants { + + public static final String ENTITY_TYPE = + "YARN_APPLICATION"; + + public static final String CREATED_EVENT_TYPE = + "YARN_APPLICATION_CREATED"; + + public static final String FINISHED_EVENT_TYPE = + "YARN_APPLICATION_FINISHED"; + + public static final String NAME_ENTITY_INFO = + "YARN_APPLICATION_NAME"; + + public static final String TYPE_ENTITY_INFO = + "YARN_APPLICATION_TYPE"; + + public static final String USER_ENTITY_INFO = + "YARN_APPLICATION_USER"; + + public static final String QUEUE_ENTITY_INFO = + "YARN_APPLICATION_QUEUE"; + + public static final String SUBMITTED_TIME_ENTITY_INFO = + "YARN_APPLICATION_SUBMITTED_TIME"; + + public static final String DIAGNOSTICS_INFO_EVENT_INFO = + "YARN_APPLICATION_DIAGNOSTICS_INFO"; + + public static final String FINAL_STATUS_EVENT_INFO = + "YARN_APPLICATION_FINAL_STATUS"; + + public static final String STATE_EVENT_INFO = + "YARN_APPLICATION_STATE"; + + public static final String LATEST_APP_ATTEMPT_EVENT_INFO = + "YARN_APPLICATION_LATEST_APP_ATTEMPT"; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java new file mode 100644 index 00000000000..8791da4f219 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/metrics/ContainerMetricsConstants.java @@ -0,0 +1,60 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.metrics; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +@Private +@Unstable +public class ContainerMetricsConstants { + + public static final String ENTITY_TYPE = "YARN_CONTAINER"; + + public static final String CREATED_EVENT_TYPE = "YARN_CONTAINER_CREATED"; + + public static final String FINISHED_EVENT_TYPE = "YARN_CONTAINER_FINISHED"; + + public static final String PARENT_PRIMARIY_FILTER = "YARN_CONTAINER_PARENT"; + + public static final String ALLOCATED_MEMORY_ENTITY_INFO = + "YARN_CONTAINER_ALLOCATED_MEMORY"; + + public static final String ALLOCATED_VCORE_ENTITY_INFO = + "YARN_CONTAINER_ALLOCATED_VCORE"; + + public static final String ALLOCATED_HOST_ENTITY_INFO = + "YARN_CONTAINER_ALLOCATED_HOST"; + + public static final String ALLOCATED_PORT_ENTITY_INFO = + "YARN_CONTAINER_ALLOCATED_PORT"; + + public static final String ALLOCATED_PRIORITY_ENTITY_INFO = + "YARN_CONTAINER_ALLOCATED_PRIORITY"; + + public static final String DIAGNOSTICS_INFO_EVENT_INFO = + "YARN_CONTAINER_DIAGNOSTICS_INFO"; + + public static final String EXIT_STATUS_EVENT_INFO = + "YARN_CONTAINER_EXIT_STATUS"; + + public static final String STATE_EVENT_INFO = + "YARN_CONTAINER_STATE"; + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index 0cfd911f83b..59db66a83c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -138,12 +138,12 @@ public static ApplicationId convert(long clustertimestamp, CharSequence id) { } public static ContainerId newContainerId(ApplicationAttemptId appAttemptId, - int containerId) { + long containerId) { return ContainerId.newInstance(appAttemptId, containerId); } public static ContainerId newContainerId(int appId, int appAttemptId, - long timestamp, int id) { + long timestamp, long id) { ApplicationId applicationId = newApplicationId(timestamp, appId); ApplicationAttemptId applicationAttemptId = newApplicationAttemptId( applicationId, appAttemptId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java index 1a76ecaf771..4a02892f7e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java @@ -20,12 +20,13 @@ import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ATTEMPT_ID; -import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.Collection; import org.apache.commons.lang.StringEscapeUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ContainerReport; @@ -67,10 +68,22 @@ protected void render(Block html) { return; } + final ApplicationAttemptId appAttemptIdFinal = appAttemptId; + UserGroupInformation callerUGI = getCallerUGI(); ApplicationAttemptReport appAttemptReport; try { - appAttemptReport = appContext.getApplicationAttempt(appAttemptId); - } catch (IOException e) { + if (callerUGI == null) { + appAttemptReport = appContext.getApplicationAttempt(appAttemptId); + } else { + appAttemptReport = callerUGI.doAs( + new PrivilegedExceptionAction () { + @Override + public ApplicationAttemptReport run() throws Exception { + return appContext.getApplicationAttempt(appAttemptIdFinal); + } + }); + } + } catch (Exception e) { String message = "Failed to read the application attempt " + appAttemptId + "."; LOG.error(message, e); @@ -108,8 +121,26 @@ protected void render(Block html) { Collection containers; try { - containers = appContext.getContainers(appAttemptId).values(); - } catch (IOException e) { + if (callerUGI == null) { + containers = appContext.getContainers(appAttemptId).values(); + } else { + containers = callerUGI.doAs( + new PrivilegedExceptionAction> () { + @Override + public Collection run() throws Exception { + return appContext.getContainers(appAttemptIdFinal).values(); + } + }); + } + } catch (RuntimeException e) { + // have this block to suppress the findbugs warning + html + .p() + ._( + "Sorry, Failed to get containers for application attempt" + attemptid + + ".")._(); + return; + } catch (Exception e) { html .p() ._( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java index 2ae495b08d8..8fa40860b82 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppBlock.java @@ -21,10 +21,11 @@ import static org.apache.hadoop.yarn.util.StringHelper.join; import static org.apache.hadoop.yarn.webapp.YarnWebParams.APPLICATION_ID; -import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.Collection; import org.apache.commons.lang.StringEscapeUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAttemptReport; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -70,10 +71,22 @@ protected void render(Block html) { return; } + final ApplicationId appIDFinal = appID; + UserGroupInformation callerUGI = getCallerUGI(); ApplicationReport appReport; try { - appReport = appContext.getApplication(appID); - } catch (IOException e) { + if (callerUGI == null) { + appReport = appContext.getApplication(appID); + } else { + appReport = callerUGI.doAs( + new PrivilegedExceptionAction () { + @Override + public ApplicationReport run() throws Exception { + return appContext.getApplication(appIDFinal); + } + }); + } + } catch (Exception e) { String message = "Failed to read the application " + appID + "."; LOG.error(message, e); html.p()._(message)._(); @@ -106,8 +119,18 @@ protected void render(Block html) { Collection attempts; try { - attempts = appContext.getApplicationAttempts(appID).values(); - } catch (IOException e) { + if (callerUGI == null) { + attempts = appContext.getApplicationAttempts(appID).values(); + } else { + attempts = callerUGI.doAs( + new PrivilegedExceptionAction> () { + @Override + public Collection run() throws Exception { + return appContext.getApplicationAttempts(appIDFinal).values(); + } + }); + } + } catch (Exception e) { String message = "Failed to read the attempts of the application " + appID + "."; LOG.error(message, e); @@ -122,14 +145,24 @@ protected void render(Block html) { ._()._().tbody(); StringBuilder attemptsTableData = new StringBuilder("[\n"); - for (ApplicationAttemptReport appAttemptReport : attempts) { + for (final ApplicationAttemptReport appAttemptReport : attempts) { AppAttemptInfo appAttempt = new AppAttemptInfo(appAttemptReport); ContainerReport containerReport; try { - containerReport = - appContext.getAMContainer(appAttemptReport + if (callerUGI == null) { + containerReport = appContext.getAMContainer(appAttemptReport .getApplicationAttemptId()); - } catch (IOException e) { + } else { + containerReport = callerUGI.doAs( + new PrivilegedExceptionAction () { + @Override + public ContainerReport run() throws Exception { + return appContext.getAMContainer(appAttemptReport + .getApplicationAttemptId()); + } + }); + } + } catch (Exception e) { String message = "Failed to read the AM container of the application attempt " + appAttemptReport.getApplicationAttemptId() + "."; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java index d4a77a8af6c..f341cf6e55d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppsBlock.java @@ -23,11 +23,12 @@ import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR; import static org.apache.hadoop.yarn.webapp.view.JQueryUI.C_PROGRESSBAR_VALUE; -import java.io.IOException; +import java.security.PrivilegedExceptionAction; import java.util.Collection; import java.util.HashSet; import org.apache.commons.lang.StringEscapeUtils; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.server.api.ApplicationContext; @@ -70,10 +71,21 @@ public void render(Block html) { } } + UserGroupInformation callerUGI = getCallerUGI(); Collection appReports; try { - appReports = appContext.getAllApplications().values(); - } catch (IOException e) { + if (callerUGI == null) { + appReports = appContext.getAllApplications().values(); + } else { + appReports = callerUGI.doAs( + new PrivilegedExceptionAction> () { + @Override + public Collection run() throws Exception { + return appContext.getAllApplications().values(); + } + }); + } + } catch (Exception e) { String message = "Failed to read the applications."; LOG.error(message, e); html.p()._(message)._(); @@ -86,7 +98,7 @@ public void render(Block html) { continue; } AppInfo app = new AppInfo(appReport); - String percent = String.format("%.1f", app.getProgress()); + String percent = String.format("%.1f", app.getProgress() * 100.0F); // AppID numerical value parsed by parseHadoopID in yarn.dt.plugins.js appsTableData .append("[\" { + + public SystemMetricsEvent(SystemMetricsEventType type) { + super(type); + } + + public SystemMetricsEvent(SystemMetricsEventType type, long timestamp) { + super(type, timestamp); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsEventType.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsEventType.java new file mode 100644 index 00000000000..c593f691ad6 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsEventType.java @@ -0,0 +1,34 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.metrics; + + +public enum SystemMetricsEventType { + // app events + APP_CREATED, + APP_FINISHED, + + // app attempt events + APP_ATTEMPT_REGISTERED, + APP_ATTEMPT_FINISHED, + + // container events + CONTAINER_CREATED, + CONTAINER_FINISHED +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java new file mode 100644 index 00000000000..886f57d4f57 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java @@ -0,0 +1,490 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.metrics; + +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.service.CompositeService; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; +import org.apache.hadoop.yarn.client.api.TimelineClient; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.AsyncDispatcher; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.Event; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.security.client.TimelineDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants; +import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.util.timeline.TimelineUtils; + +@Private +@Unstable +public class SystemMetricsPublisher extends CompositeService { + + private static final Log LOG = LogFactory + .getLog(SystemMetricsPublisher.class); + private static final int MAX_GET_TIMELINE_DELEGATION_TOKEN_ATTEMPTS = 10; + + private Dispatcher dispatcher; + private TimelineClient client; + private boolean publishSystemMetrics; + private int getTimelineDelegtionTokenAttempts = 0; + private boolean hasReceivedTimelineDelegtionToken = false; + + public SystemMetricsPublisher() { + super(SystemMetricsPublisher.class.getName()); + } + + @Override + protected void serviceInit(Configuration conf) throws Exception { + publishSystemMetrics = + conf.getBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, + YarnConfiguration.DEFAULT_TIMELINE_SERVICE_ENABLED) && + conf.getBoolean(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED, + YarnConfiguration.DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_ENABLED); + + if (publishSystemMetrics) { + client = TimelineClient.createTimelineClient(); + addIfService(client); + + dispatcher = createDispatcher(conf); + dispatcher.register(SystemMetricsEventType.class, + new ForwardingEventHandler()); + addIfService(dispatcher); + LOG.info("YARN system metrics publishing service is enabled"); + } else { + LOG.info("YARN system metrics publishing service is not enabled"); + } + super.serviceInit(conf); + } + + @SuppressWarnings("unchecked") + public void appCreated(RMApp app, long createdTime) { + if (publishSystemMetrics) { + dispatcher.getEventHandler().handle( + new ApplicationCreatedEvent( + app.getApplicationId(), + app.getName(), + app.getApplicationType(), + app.getUser(), + app.getQueue(), + app.getSubmitTime(), + createdTime)); + } + } + + @SuppressWarnings("unchecked") + public void appFinished(RMApp app, RMAppState state, long finishedTime) { + if (publishSystemMetrics) { + dispatcher.getEventHandler().handle( + new ApplicationFinishedEvent( + app.getApplicationId(), + app.getDiagnostics().toString(), + app.getFinalApplicationStatus(), + RMServerUtils.createApplicationState(state), + app.getCurrentAppAttempt() == null ? + null : app.getCurrentAppAttempt().getAppAttemptId(), + finishedTime)); + } + } + + @SuppressWarnings("unchecked") + public void appAttemptRegistered(RMAppAttempt appAttempt, + long registeredTime) { + if (publishSystemMetrics) { + dispatcher.getEventHandler().handle( + new AppAttemptRegisteredEvent( + appAttempt.getAppAttemptId(), + appAttempt.getHost(), + appAttempt.getRpcPort(), + appAttempt.getTrackingUrl(), + appAttempt.getOriginalTrackingUrl(), + appAttempt.getMasterContainer().getId(), + registeredTime)); + } + } + + @SuppressWarnings("unchecked") + public void appAttemptFinished(RMAppAttempt appAttempt, + RMAppAttemptState state, long finishedTime) { + if (publishSystemMetrics) { + dispatcher.getEventHandler().handle( + new AppAttemptFinishedEvent( + appAttempt.getAppAttemptId(), + appAttempt.getTrackingUrl(), + appAttempt.getOriginalTrackingUrl(), + appAttempt.getDiagnostics(), + appAttempt.getFinalApplicationStatus(), + RMServerUtils.createApplicationAttemptState(state), + finishedTime)); + } + } + + @SuppressWarnings("unchecked") + public void containerCreated(RMContainer container, long createdTime) { + if (publishSystemMetrics) { + dispatcher.getEventHandler().handle( + new ContainerCreatedEvent( + container.getContainerId(), + container.getAllocatedResource(), + container.getAllocatedNode(), + container.getAllocatedPriority(), + createdTime)); + } + } + + @SuppressWarnings("unchecked") + public void containerFinished(RMContainer container, long finishedTime) { + if (publishSystemMetrics) { + dispatcher.getEventHandler().handle( + new ContainerFinishedEvent( + container.getContainerId(), + container.getDiagnosticsInfo(), + container.getContainerExitStatus(), + container.getContainerState(), + finishedTime)); + } + } + + protected Dispatcher createDispatcher(Configuration conf) { + MultiThreadedDispatcher dispatcher = + new MultiThreadedDispatcher( + conf.getInt( + YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE, + YarnConfiguration.DEFAULT_RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE)); + dispatcher.setDrainEventsOnStop(); + return dispatcher; + } + + protected void handleSystemMetricsEvent( + SystemMetricsEvent event) { + switch (event.getType()) { + case APP_CREATED: + publishApplicationCreatedEvent((ApplicationCreatedEvent) event); + break; + case APP_FINISHED: + publishApplicationFinishedEvent((ApplicationFinishedEvent) event); + break; + case APP_ATTEMPT_REGISTERED: + publishAppAttemptRegisteredEvent((AppAttemptRegisteredEvent) event); + break; + case APP_ATTEMPT_FINISHED: + publishAppAttemptFinishedEvent((AppAttemptFinishedEvent) event); + break; + case CONTAINER_CREATED: + publishContainerCreatedEvent((ContainerCreatedEvent) event); + break; + case CONTAINER_FINISHED: + publishContainerFinishedEvent((ContainerFinishedEvent) event); + break; + default: + LOG.error("Unknown SystemMetricsEvent type: " + event.getType()); + } + } + + private void publishApplicationCreatedEvent(ApplicationCreatedEvent event) { + TimelineEntity entity = + createApplicationEntity(event.getApplicationId()); + Map entityInfo = new HashMap(); + entityInfo.put(ApplicationMetricsConstants.NAME_ENTITY_INFO, + event.getApplicationName()); + entityInfo.put(ApplicationMetricsConstants.TYPE_ENTITY_INFO, + event.getApplicationType()); + entityInfo.put(ApplicationMetricsConstants.USER_ENTITY_INFO, + event.getUser()); + entityInfo.put(ApplicationMetricsConstants.QUEUE_ENTITY_INFO, + event.getQueue()); + entityInfo.put(ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO, + event.getSubmittedTime()); + entity.setOtherInfo(entityInfo); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType( + ApplicationMetricsConstants.CREATED_EVENT_TYPE); + tEvent.setTimestamp(event.getTimestamp()); + entity.addEvent(tEvent); + putEntity(entity); + } + + private void publishApplicationFinishedEvent(ApplicationFinishedEvent event) { + TimelineEntity entity = + createApplicationEntity(event.getApplicationId()); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType( + ApplicationMetricsConstants.FINISHED_EVENT_TYPE); + tEvent.setTimestamp(event.getTimestamp()); + Map eventInfo = new HashMap(); + eventInfo.put(ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO, + event.getDiagnosticsInfo()); + eventInfo.put(ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO, + event.getFinalApplicationStatus().toString()); + eventInfo.put(ApplicationMetricsConstants.STATE_EVENT_INFO, + event.getYarnApplicationState().toString()); + if (event.getLatestApplicationAttemptId() != null) { + eventInfo.put(ApplicationMetricsConstants.LATEST_APP_ATTEMPT_EVENT_INFO, + event.getLatestApplicationAttemptId().toString()); + } + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + putEntity(entity); + } + + private static TimelineEntity createApplicationEntity( + ApplicationId applicationId) { + TimelineEntity entity = new TimelineEntity(); + entity.setEntityType(ApplicationMetricsConstants.ENTITY_TYPE); + entity.setEntityId(applicationId.toString()); + return entity; + } + + private void + publishAppAttemptRegisteredEvent(AppAttemptRegisteredEvent event) { + TimelineEntity entity = + createAppAttemptEntity(event.getApplicationAttemptId()); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType( + AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE); + tEvent.setTimestamp(event.getTimestamp()); + Map eventInfo = new HashMap(); + eventInfo.put( + AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO, + event.getTrackingUrl()); + eventInfo.put( + AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO, + event.getOriginalTrackingURL()); + eventInfo.put(AppAttemptMetricsConstants.HOST_EVENT_INFO, + event.getHost()); + eventInfo.put(AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO, + event.getRpcPort()); + eventInfo.put( + AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO, + event.getMasterContainerId().toString()); + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + putEntity(entity); + } + + private void publishAppAttemptFinishedEvent(AppAttemptFinishedEvent event) { + TimelineEntity entity = + createAppAttemptEntity(event.getApplicationAttemptId()); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType(AppAttemptMetricsConstants.FINISHED_EVENT_TYPE); + tEvent.setTimestamp(event.getTimestamp()); + Map eventInfo = new HashMap(); + eventInfo.put( + AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO, + event.getTrackingUrl()); + eventInfo.put( + AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO, + event.getOriginalTrackingURL()); + eventInfo.put(AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO, + event.getDiagnosticsInfo()); + eventInfo.put(AppAttemptMetricsConstants.FINAL_STATUS_EVENT_INFO, + event.getFinalApplicationStatus().toString()); + eventInfo.put(AppAttemptMetricsConstants.STATE_EVENT_INFO, + event.getYarnApplicationAttemptState().toString()); + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + putEntity(entity); + } + + private static TimelineEntity createAppAttemptEntity( + ApplicationAttemptId appAttemptId) { + TimelineEntity entity = new TimelineEntity(); + entity.setEntityType( + AppAttemptMetricsConstants.ENTITY_TYPE); + entity.setEntityId(appAttemptId.toString()); + entity.addPrimaryFilter(AppAttemptMetricsConstants.PARENT_PRIMARY_FILTER, + appAttemptId.getApplicationId().toString()); + return entity; + } + + private void publishContainerCreatedEvent(ContainerCreatedEvent event) { + TimelineEntity entity = createContainerEntity(event.getContainerId()); + Map entityInfo = new HashMap(); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO, + event.getAllocatedResource().getMemory()); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO, + event.getAllocatedResource().getVirtualCores()); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO, + event.getAllocatedNode().getHost()); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO, + event.getAllocatedNode().getPort()); + entityInfo.put(ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO, + event.getAllocatedPriority().getPriority()); + entity.setOtherInfo(entityInfo); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType(ContainerMetricsConstants.CREATED_EVENT_TYPE); + tEvent.setTimestamp(event.getTimestamp()); + entity.addEvent(tEvent); + putEntity(entity); + } + + private void publishContainerFinishedEvent(ContainerFinishedEvent event) { + TimelineEntity entity = createContainerEntity(event.getContainerId()); + TimelineEvent tEvent = new TimelineEvent(); + tEvent.setEventType(ContainerMetricsConstants.FINISHED_EVENT_TYPE); + tEvent.setTimestamp(event.getTimestamp()); + Map eventInfo = new HashMap(); + eventInfo.put(ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO, + event.getDiagnosticsInfo()); + eventInfo.put(ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO, + event.getContainerExitStatus()); + eventInfo.put(ContainerMetricsConstants.STATE_EVENT_INFO, + event.getContainerState().toString()); + tEvent.setEventInfo(eventInfo); + entity.addEvent(tEvent); + putEntity(entity); + } + + private static TimelineEntity createContainerEntity( + ContainerId containerId) { + TimelineEntity entity = new TimelineEntity(); + entity.setEntityType( + ContainerMetricsConstants.ENTITY_TYPE); + entity.setEntityId(containerId.toString()); + entity.addPrimaryFilter(ContainerMetricsConstants.PARENT_PRIMARIY_FILTER, + containerId.getApplicationAttemptId().toString()); + return entity; + } + + private void putEntity(TimelineEntity entity) { + if (UserGroupInformation.isSecurityEnabled()) { + if (!hasReceivedTimelineDelegtionToken + && getTimelineDelegtionTokenAttempts < MAX_GET_TIMELINE_DELEGATION_TOKEN_ATTEMPTS) { + try { + Token token = + client.getDelegationToken( + UserGroupInformation.getCurrentUser().getUserName()); + UserGroupInformation.getCurrentUser().addToken(token); + hasReceivedTimelineDelegtionToken = true; + } catch (Exception e) { + LOG.error("Error happens when getting timeline delegation token", e); + } finally { + ++getTimelineDelegtionTokenAttempts; + if (!hasReceivedTimelineDelegtionToken + && getTimelineDelegtionTokenAttempts == MAX_GET_TIMELINE_DELEGATION_TOKEN_ATTEMPTS) { + LOG.error("Run out of the attempts to get timeline delegation token. " + + "Use kerberos authentication only."); + } + } + } + } + try { + if (LOG.isDebugEnabled()) { + LOG.debug("Publishing the entity " + entity.getEntityId() + + ", JSON-style content: " + TimelineUtils.dumpTimelineRecordtoJSON(entity)); + } + client.putEntities(entity); + } catch (Exception e) { + LOG.error("Error when publishing entity [" + entity.getEntityType() + "," + + entity.getEntityId() + "]", e); + } + } + + /** + * EventHandler implementation which forward events to SystemMetricsPublisher. + * Making use of it, SystemMetricsPublisher can avoid to have a public handle + * method. + */ + private final class ForwardingEventHandler implements + EventHandler { + + @Override + public void handle(SystemMetricsEvent event) { + handleSystemMetricsEvent(event); + } + + } + + @SuppressWarnings({ "rawtypes", "unchecked" }) + protected static class MultiThreadedDispatcher extends CompositeService + implements Dispatcher { + + private List dispatchers = + new ArrayList(); + + public MultiThreadedDispatcher(int num) { + super(MultiThreadedDispatcher.class.getName()); + for (int i = 0; i < num; ++i) { + AsyncDispatcher dispatcher = createDispatcher(); + dispatchers.add(dispatcher); + addIfService(dispatcher); + } + } + + @Override + public EventHandler getEventHandler() { + return new CompositEventHandler(); + } + + @Override + public void register(Class eventType, EventHandler handler) { + for (AsyncDispatcher dispatcher : dispatchers) { + dispatcher.register(eventType, handler); + } + } + + public void setDrainEventsOnStop() { + for (AsyncDispatcher dispatcher : dispatchers) { + dispatcher.setDrainEventsOnStop(); + } + } + + private class CompositEventHandler implements EventHandler { + + @Override + public void handle(Event event) { + // Use hashCode (of ApplicationId) to dispatch the event to the child + // dispatcher, such that all the writing events of one application will + // be handled by one thread, the scheduled order of the these events + // will be preserved + int index = (event.hashCode() & Integer.MAX_VALUE) % dispatchers.size(); + dispatchers.get(index).getEventHandler().handle(event); + } + + } + + protected AsyncDispatcher createDispatcher() { + return new AsyncDispatcher(); + } + + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java index cea3d7c422f..0f48b0cb184 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java @@ -602,8 +602,7 @@ public int compare(RMContainer a, RMContainer b) { if (priorityComp != 0) { return priorityComp; } - return b.getContainerId().getId() - - a.getContainerId().getId(); + return b.getContainerId().compareTo(a.getContainerId()); } }); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java index 4c01a618c89..216659b5ec6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -39,6 +39,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; @@ -164,9 +165,9 @@ protected synchronized void storeVersion() throws Exception { } @Override - public synchronized int getAndIncrementEpoch() throws Exception { + public synchronized long getAndIncrementEpoch() throws Exception { Path epochNodePath = getNodePath(rootDirPath, EPOCH_NODE); - int currentEpoch = 0; + long currentEpoch = 0; if (fs.exists(epochNodePath)) { // load current epoch FileStatus status = fs.getFileStatus(epochNodePath); @@ -573,12 +574,16 @@ private void deleteFile(Path deletePath) throws Exception { } private byte[] readFile(Path inputPath, long len) throws Exception { - FSDataInputStream fsIn = fs.open(inputPath); - // state data will not be that "long" - byte[] data = new byte[(int)len]; - fsIn.readFully(data); - fsIn.close(); - return data; + FSDataInputStream fsIn = null; + try { + fsIn = fs.open(inputPath); + // state data will not be that "long" + byte[] data = new byte[(int) len]; + fsIn.readFully(data); + return data; + } finally { + IOUtils.cleanup(LOG, fsIn); + } } /* @@ -592,10 +597,15 @@ private void writeFile(Path outputPath, byte[] data) throws Exception { FSDataOutputStream fsOut = null; // This file will be overwritten when app/attempt finishes for saving the // final status. - fsOut = fs.create(tempPath, true); - fsOut.write(data); - fsOut.close(); - fs.rename(tempPath, outputPath); + try { + fsOut = fs.create(tempPath, true); + fsOut.write(data); + fsOut.close(); + fsOut = null; + fs.rename(tempPath, outputPath); + } finally { + IOUtils.cleanup(LOG, fsOut); + } } /* diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java index efaa039b946..f208c74923e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/MemoryRMStateStore.java @@ -44,7 +44,7 @@ public class MemoryRMStateStore extends RMStateStore { RMState state = new RMState(); - private int epoch = 0; + private long epoch = 0L; @VisibleForTesting public RMState getState() { @@ -56,8 +56,8 @@ public void checkVersion() throws Exception { } @Override - public synchronized int getAndIncrementEpoch() throws Exception { - int currentEpoch = epoch; + public synchronized long getAndIncrementEpoch() throws Exception { + long currentEpoch = epoch; epoch = epoch + 1; return currentEpoch; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index e910c19629e..b957d12f564 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -49,8 +49,8 @@ protected void closeInternal() throws Exception { } @Override - public synchronized int getAndIncrementEpoch() throws Exception { - return 0; + public synchronized long getAndIncrementEpoch() throws Exception { + return 0L; } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java index 3074d337a50..ac51a1747c0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStore.java @@ -545,7 +545,7 @@ public void checkVersion() throws Exception { /** * Get the current epoch of RM and increment the value. */ - public abstract int getAndIncrementEpoch() throws Exception; + public abstract long getAndIncrementEpoch() throws Exception; /** * Blocking API diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java index 25f38190878..66fe9883070 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKRMStateStore.java @@ -412,9 +412,9 @@ protected synchronized Version loadVersion() throws Exception { } @Override - public synchronized int getAndIncrementEpoch() throws Exception { + public synchronized long getAndIncrementEpoch() throws Exception { String epochNodePath = getNodePath(zkRootNodePath, EPOCH_NODE); - int currentEpoch = 0; + long currentEpoch = 0; if (existsWithRetries(epochNodePath, true) != null) { // load current epoch byte[] data = getDataWithRetries(epochNodePath, true); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java index 80ec48ce634..714595618e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/Epoch.java @@ -32,15 +32,15 @@ @Unstable public abstract class Epoch { - public static Epoch newInstance(int sequenceNumber) { + public static Epoch newInstance(long sequenceNumber) { Epoch epoch = Records.newRecord(Epoch.class); epoch.setEpoch(sequenceNumber); return epoch; } - public abstract int getEpoch(); + public abstract long getEpoch(); - public abstract void setEpoch(int sequenceNumber); + public abstract void setEpoch(long sequenceNumber); public abstract EpochProto getProto(); @@ -50,10 +50,7 @@ public String toString() { @Override public int hashCode() { - final int prime = 31; - int result = 1; - result = prime * result + getEpoch(); - return result; + return (int) (getEpoch() ^ (getEpoch() >>> 32)); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java index a6ddeadb49c..025a1e9aeed 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/EpochPBImpl.java @@ -53,13 +53,13 @@ private void maybeInitBuilder() { } @Override - public int getEpoch() { + public long getEpoch() { EpochProtoOrBuilder p = viaProto ? proto : builder; - return (int) (p.getEpoch() & 0xffffffff); + return p.getEpoch(); } @Override - public void setEpoch(int sequentialNumber) { + public void setEpoch(long sequentialNumber) { maybeInitBuilder(); builder.setEpoch(sequentialNumber); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java index bf374b434d3..a1ae3ca3c64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMApp.java @@ -171,6 +171,12 @@ ApplicationReport createAndGetApplicationReport(String clientUserName, */ String getTrackingUrl(); + /** + * The original tracking url for the application master. + * @return the original tracking url for the application master. + */ + String getOriginalTrackingUrl(); + /** * the diagnostics information for the application master. * @return the diagnostics information for the application master. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 5b6df00eb6b..2dd2040ca46 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -365,6 +365,7 @@ public RMAppImpl(ApplicationId applicationId, RMContext rmContext, this.stateMachine = stateMachineFactory.make(this); rmContext.getRMApplicationHistoryWriter().applicationStarted(this); + rmContext.getSystemMetricsPublisher().appCreated(this, startTime); } @Override @@ -627,6 +628,20 @@ public String getTrackingUrl() { } } + @Override + public String getOriginalTrackingUrl() { + this.readLock.lock(); + + try { + if (this.currentAttempt != null) { + return this.currentAttempt.getOriginalTrackingUrl(); + } + return null; + } finally { + this.readLock.unlock(); + } + } + @Override public StringBuilder getDiagnostics() { this.readLock.lock(); @@ -1096,6 +1111,8 @@ public void transition(RMAppImpl app, RMAppEvent event) { app.rmContext.getRMApplicationHistoryWriter() .applicationFinished(app, finalState); + app.rmContext.getSystemMetricsPublisher() + .appFinished(app, finalState, app.finishTime); }; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index 93db340e4a7..148969695f7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -1156,6 +1156,9 @@ public void transition(RMAppAttemptImpl appAttempt, appAttempt.rmContext.getRMApplicationHistoryWriter() .applicationAttemptFinished(appAttempt, finalAttemptState); + appAttempt.rmContext.getSystemMetricsPublisher() + .appAttemptFinished( + appAttempt, finalAttemptState, System.currentTimeMillis()); } } @@ -1269,6 +1272,8 @@ public void transition(RMAppAttemptImpl appAttempt, appAttempt.rmContext.getRMApplicationHistoryWriter() .applicationAttemptStarted(appAttempt); + appAttempt.rmContext.getSystemMetricsPublisher() + .appAttemptRegistered(appAttempt, System.currentTimeMillis()); } } @@ -1723,8 +1728,8 @@ public ApplicationAttemptReport createApplicationAttemptReport() { masterContainer == null ? null : masterContainer.getId(); attemptReport = ApplicationAttemptReport.newInstance(this .getAppAttemptId(), this.getHost(), this.getRpcPort(), this - .getTrackingUrl(), this.getDiagnostics(), YarnApplicationAttemptState - .valueOf(this.getState().toString()), amId); + .getTrackingUrl(), this.getOriginalTrackingUrl(), this.getDiagnostics(), + YarnApplicationAttemptState .valueOf(this.getState().toString()), amId); } finally { this.readLock.unlock(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index e7bb98e7ddc..885e864124b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -192,6 +192,8 @@ public RMContainerImpl(Container container, this.writeLock = lock.writeLock(); rmContext.getRMApplicationHistoryWriter().containerStarted(this); + rmContext.getSystemMetricsPublisher().containerCreated( + this, this.creationTime); } @Override @@ -497,6 +499,8 @@ public void transition(RMContainerImpl container, RMContainerEvent event) { container.rmContext.getRMApplicationHistoryWriter().containerFinished( container); + container.rmContext.getSystemMetricsPublisher().containerFinished( + container, container.finishTime); } private static void updateAttemptMetrics(RMContainerImpl container) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java index e871f429e16..3ade7f73d31 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/AppSchedulingInfo.java @@ -26,7 +26,7 @@ import java.util.Map; import java.util.Set; import java.util.TreeSet; -import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -58,9 +58,8 @@ public class AppSchedulingInfo { Queue queue; final String user; // TODO making containerIdCounter long - private final AtomicInteger containerIdCounter; - private final int EPOCH_BIT_MASK = 0x3ff; - private final int EPOCH_BIT_SHIFT = 22; + private final AtomicLong containerIdCounter; + private final int EPOCH_BIT_SHIFT = 40; final Set priorities = new TreeSet( new org.apache.hadoop.yarn.server.resourcemanager.resource.Priority.Comparator()); @@ -77,15 +76,14 @@ public class AppSchedulingInfo { public AppSchedulingInfo(ApplicationAttemptId appAttemptId, String user, Queue queue, ActiveUsersManager activeUsersManager, - int epoch) { + long epoch) { this.applicationAttemptId = appAttemptId; this.applicationId = appAttemptId.getApplicationId(); this.queue = queue; this.queueName = queue.getQueueName(); this.user = user; this.activeUsersManager = activeUsersManager; - this.containerIdCounter = new AtomicInteger( - (epoch & EPOCH_BIT_MASK) << EPOCH_BIT_SHIFT); + this.containerIdCounter = new AtomicLong(epoch << EPOCH_BIT_SHIFT); } public ApplicationId getApplicationId() { @@ -117,7 +115,7 @@ private synchronized void clearRequests() { LOG.info("Application " + applicationId + " requests cleared"); } - public int getNewContainerId() { + public long getNewContainerId() { return this.containerIdCounter.incrementAndGet(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 7032e3c3787..5240789699a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -182,7 +182,7 @@ public Set getPendingRelease() { return this.pendingRelease; } - public int getNewContainerId() { + public long getNewContainerId() { return appSchedulingInfo.getNewContainerId(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index d720eb61734..e3f1eba2b9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import static org.mockito.Matchers.isA; import static org.mockito.Mockito.mock; @@ -119,6 +120,8 @@ public ConcurrentMap getRMApps() { } }; ((RMContextImpl)context).setStateStore(mock(RMStateStore.class)); + ((RMContextImpl)context).setSystemMetricsPublisher( + mock(SystemMetricsPublisher.class)); return context; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 571c96f6007..db867a95316 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -104,6 +104,7 @@ import org.apache.hadoop.yarn.ipc.YarnRPC; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.NullRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; @@ -1081,6 +1082,8 @@ private void mockRMContext(YarnScheduler yarnScheduler, RMContext rmContext) .thenThrow(new IOException("queue does not exist")); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); + SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class); + when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); ConcurrentHashMap apps = getRMApps(rmContext, yarnScheduler); when(rmContext.getRMApps()).thenReturn(apps); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index d6af0d7307e..02983c25682 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -238,7 +238,7 @@ public void testSchedulerRecovery() throws Exception { } // *********** check appSchedulingInfo state *********** - assertEquals((1 << 22) + 1, schedulerAttempt.getNewContainerId()); + assertEquals((1L << 40) + 1L, schedulerAttempt.getNewContainerId()); } private void checkCSQueue(MockRM rm, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index 15e45c4c36e..7d928279779 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -117,6 +117,10 @@ public String getTrackingUrl() { throw new UnsupportedOperationException("Not supported yet."); } @Override + public String getOriginalTrackingUrl() { + throw new UnsupportedOperationException("Not supported yet."); + } + @Override public int getMaxAppAttempts() { throw new UnsupportedOperationException("Not supported yet."); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java new file mode 100644 index 00000000000..ec20af31e1d --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java @@ -0,0 +1,355 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.metrics; + +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +import java.util.EnumSet; + +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.NodeId; +import org.apache.hadoop.yarn.api.records.Priority; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.YarnApplicationAttemptState; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEntity; +import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.applicationhistoryservice.ApplicationHistoryServer; +import org.apache.hadoop.yarn.server.applicationhistoryservice.webapp.AHSWebApp; +import org.apache.hadoop.yarn.server.metrics.AppAttemptMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ApplicationMetricsConstants; +import org.apache.hadoop.yarn.server.metrics.ContainerMetricsConstants; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; +import org.apache.hadoop.yarn.server.timeline.MemoryTimelineStore; +import org.apache.hadoop.yarn.server.timeline.TimelineReader.Field; +import org.apache.hadoop.yarn.server.timeline.TimelineStore; +import org.junit.AfterClass; +import org.junit.Assert; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestSystemMetricsPublisher { + + private static ApplicationHistoryServer timelineServer; + private static SystemMetricsPublisher metricsPublisher; + private static TimelineStore store; + + @BeforeClass + public static void setup() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.setBoolean(YarnConfiguration.TIMELINE_SERVICE_ENABLED, true); + conf.setBoolean(YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_ENABLED, true); + conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE, + MemoryTimelineStore.class, TimelineStore.class); + conf.setInt( + YarnConfiguration.RM_SYSTEM_METRICS_PUBLISHER_DISPATCHER_POOL_SIZE, + 2); + + timelineServer = new ApplicationHistoryServer(); + timelineServer.init(conf); + timelineServer.start(); + store = timelineServer.getTimelineStore(); + + metricsPublisher = new SystemMetricsPublisher(); + metricsPublisher.init(conf); + metricsPublisher.start(); + } + + @AfterClass + public static void tearDown() throws Exception { + if (metricsPublisher != null) { + metricsPublisher.stop(); + } + if (timelineServer != null) { + timelineServer.stop(); + } + AHSWebApp.resetInstance(); + } + + @Test(timeout = 10000) + public void testPublishApplicationMetrics() throws Exception { + ApplicationId appId = ApplicationId.newInstance(0, 1); + RMApp app = createRMApp(appId); + metricsPublisher.appCreated(app, app.getStartTime()); + metricsPublisher.appFinished(app, RMAppState.FINISHED, app.getFinishTime()); + TimelineEntity entity = null; + do { + entity = + store.getEntity(appId.toString(), + ApplicationMetricsConstants.ENTITY_TYPE, + EnumSet.allOf(Field.class)); + // ensure two events are both published before leaving the loop + } while (entity == null || entity.getEvents().size() < 2); + // verify all the fields + Assert.assertEquals(ApplicationMetricsConstants.ENTITY_TYPE, + entity.getEntityType()); + Assert + .assertEquals(app.getApplicationId().toString(), entity.getEntityId()); + Assert + .assertEquals( + app.getName(), + entity.getOtherInfo().get( + ApplicationMetricsConstants.NAME_ENTITY_INFO)); + Assert.assertEquals(app.getQueue(), + entity.getOtherInfo() + .get(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)); + Assert + .assertEquals( + app.getUser(), + entity.getOtherInfo().get( + ApplicationMetricsConstants.USER_ENTITY_INFO)); + Assert + .assertEquals( + app.getApplicationType(), + entity.getOtherInfo().get( + ApplicationMetricsConstants.TYPE_ENTITY_INFO)); + Assert.assertEquals(app.getSubmitTime(), + entity.getOtherInfo().get( + ApplicationMetricsConstants.SUBMITTED_TIME_ENTITY_INFO)); + boolean hasCreatedEvent = false; + boolean hasFinishedEvent = false; + for (TimelineEvent event : entity.getEvents()) { + if (event.getEventType().equals( + ApplicationMetricsConstants.CREATED_EVENT_TYPE)) { + hasCreatedEvent = true; + Assert.assertEquals(app.getStartTime(), event.getTimestamp()); + } else if (event.getEventType().equals( + ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) { + hasFinishedEvent = true; + Assert.assertEquals(app.getFinishTime(), event.getTimestamp()); + Assert.assertEquals( + app.getDiagnostics().toString(), + event.getEventInfo().get( + ApplicationMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)); + Assert.assertEquals( + app.getFinalApplicationStatus().toString(), + event.getEventInfo().get( + ApplicationMetricsConstants.FINAL_STATUS_EVENT_INFO)); + Assert.assertEquals(YarnApplicationState.FINISHED.toString(), event + .getEventInfo().get(ApplicationMetricsConstants.STATE_EVENT_INFO)); + } + } + Assert.assertTrue(hasCreatedEvent && hasFinishedEvent); + } + + @Test(timeout = 10000) + public void testPublishAppAttemptMetrics() throws Exception { + ApplicationAttemptId appAttemptId = + ApplicationAttemptId.newInstance(ApplicationId.newInstance(0, 1), 1); + RMAppAttempt appAttempt = createRMAppAttempt(appAttemptId); + metricsPublisher.appAttemptRegistered(appAttempt, Integer.MAX_VALUE + 1L); + metricsPublisher.appAttemptFinished(appAttempt, RMAppAttemptState.FINISHED, + Integer.MAX_VALUE + 2L); + TimelineEntity entity = null; + do { + entity = + store.getEntity(appAttemptId.toString(), + AppAttemptMetricsConstants.ENTITY_TYPE, + EnumSet.allOf(Field.class)); + // ensure two events are both published before leaving the loop + } while (entity == null || entity.getEvents().size() < 2); + // verify all the fields + Assert.assertEquals(AppAttemptMetricsConstants.ENTITY_TYPE, + entity.getEntityType()); + Assert.assertEquals(appAttemptId.toString(), entity.getEntityId()); + Assert.assertEquals( + appAttemptId.getApplicationId().toString(), + entity.getPrimaryFilters() + .get(AppAttemptMetricsConstants.PARENT_PRIMARY_FILTER).iterator() + .next()); + boolean hasRegisteredEvent = false; + boolean hasFinishedEvent = false; + for (TimelineEvent event : entity.getEvents()) { + if (event.getEventType().equals( + AppAttemptMetricsConstants.REGISTERED_EVENT_TYPE)) { + hasRegisteredEvent = true; + Assert.assertEquals(appAttempt.getHost(), + event.getEventInfo() + .get(AppAttemptMetricsConstants.HOST_EVENT_INFO)); + Assert + .assertEquals(appAttempt.getRpcPort(), + event.getEventInfo().get( + AppAttemptMetricsConstants.RPC_PORT_EVENT_INFO)); + Assert.assertEquals( + appAttempt.getMasterContainer().getId().toString(), + event.getEventInfo().get( + AppAttemptMetricsConstants.MASTER_CONTAINER_EVENT_INFO)); + } else if (event.getEventType().equals( + AppAttemptMetricsConstants.FINISHED_EVENT_TYPE)) { + hasFinishedEvent = true; + Assert.assertEquals(appAttempt.getDiagnostics(), event.getEventInfo() + .get(AppAttemptMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)); + Assert.assertEquals(appAttempt.getTrackingUrl(), event.getEventInfo() + .get(AppAttemptMetricsConstants.TRACKING_URL_EVENT_INFO)); + Assert.assertEquals( + appAttempt.getOriginalTrackingUrl(), + event.getEventInfo().get( + AppAttemptMetricsConstants.ORIGINAL_TRACKING_URL_EVENT_INFO)); + Assert.assertEquals( + appAttempt.getFinalApplicationStatus().toString(), + event.getEventInfo().get( + AppAttemptMetricsConstants.FINAL_STATUS_EVENT_INFO)); + Assert.assertEquals( + YarnApplicationAttemptState.FINISHED.toString(), + event.getEventInfo().get( + AppAttemptMetricsConstants.STATE_EVENT_INFO)); + } + } + Assert.assertTrue(hasRegisteredEvent && hasFinishedEvent); + } + + @Test(timeout = 10000) + public void testPublishContainerMetrics() throws Exception { + ContainerId containerId = + ContainerId.newInstance(ApplicationAttemptId.newInstance( + ApplicationId.newInstance(0, 1), 1), 1); + RMContainer container = createRMContainer(containerId); + metricsPublisher.containerCreated(container, container.getCreationTime()); + metricsPublisher.containerFinished(container, container.getFinishTime()); + TimelineEntity entity = null; + do { + entity = + store.getEntity(containerId.toString(), + ContainerMetricsConstants.ENTITY_TYPE, + EnumSet.allOf(Field.class)); + // ensure two events are both published before leaving the loop + } while (entity == null || entity.getEvents().size() < 2); + // verify all the fields + Assert.assertEquals(ContainerMetricsConstants.ENTITY_TYPE, + entity.getEntityType()); + Assert.assertEquals(containerId.toString(), entity.getEntityId()); + Assert.assertEquals( + containerId.getApplicationAttemptId().toString(), + entity.getPrimaryFilters() + .get(ContainerMetricsConstants.PARENT_PRIMARIY_FILTER).iterator() + .next()); + Assert.assertEquals( + container.getAllocatedNode().getHost(), + entity.getOtherInfo().get( + ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO)); + Assert.assertEquals( + container.getAllocatedNode().getPort(), + entity.getOtherInfo().get( + ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO)); + Assert.assertEquals( + container.getAllocatedResource().getMemory(), + entity.getOtherInfo().get( + ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO)); + Assert.assertEquals( + container.getAllocatedResource().getVirtualCores(), + entity.getOtherInfo().get( + ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO)); + Assert.assertEquals( + container.getAllocatedPriority().getPriority(), + entity.getOtherInfo().get( + ContainerMetricsConstants.ALLOCATED_PRIORITY_ENTITY_INFO)); + boolean hasCreatedEvent = false; + boolean hasFinishedEvent = false; + for (TimelineEvent event : entity.getEvents()) { + if (event.getEventType().equals( + ContainerMetricsConstants.CREATED_EVENT_TYPE)) { + hasCreatedEvent = true; + Assert.assertEquals(container.getCreationTime(), event.getTimestamp()); + } else if (event.getEventType().equals( + ContainerMetricsConstants.FINISHED_EVENT_TYPE)) { + hasFinishedEvent = true; + Assert.assertEquals(container.getFinishTime(), event.getTimestamp()); + Assert.assertEquals( + container.getDiagnosticsInfo(), + event.getEventInfo().get( + ContainerMetricsConstants.DIAGNOSTICS_INFO_EVENT_INFO)); + Assert.assertEquals( + container.getContainerExitStatus(), + event.getEventInfo().get( + ContainerMetricsConstants.EXIT_STATUS_EVENT_INFO)); + Assert.assertEquals(container.getContainerState().toString(), event + .getEventInfo().get(ContainerMetricsConstants.STATE_EVENT_INFO)); + } + } + Assert.assertTrue(hasCreatedEvent && hasFinishedEvent); + } + + private static RMApp createRMApp(ApplicationId appId) { + RMApp app = mock(RMApp.class); + when(app.getApplicationId()).thenReturn(appId); + when(app.getName()).thenReturn("test app"); + when(app.getApplicationType()).thenReturn("test app type"); + when(app.getUser()).thenReturn("test user"); + when(app.getQueue()).thenReturn("test queue"); + when(app.getSubmitTime()).thenReturn(Integer.MAX_VALUE + 1L); + when(app.getStartTime()).thenReturn(Integer.MAX_VALUE + 2L); + when(app.getFinishTime()).thenReturn(Integer.MAX_VALUE + 3L); + when(app.getDiagnostics()).thenReturn( + new StringBuilder("test diagnostics info")); + RMAppAttempt appAttempt = mock(RMAppAttempt.class); + when(appAttempt.getAppAttemptId()).thenReturn( + ApplicationAttemptId.newInstance(appId, 1)); + when(app.getCurrentAppAttempt()).thenReturn(appAttempt); + when(app.getFinalApplicationStatus()).thenReturn( + FinalApplicationStatus.UNDEFINED); + return app; + } + + private static RMAppAttempt createRMAppAttempt( + ApplicationAttemptId appAttemptId) { + RMAppAttempt appAttempt = mock(RMAppAttempt.class); + when(appAttempt.getAppAttemptId()).thenReturn(appAttemptId); + when(appAttempt.getHost()).thenReturn("test host"); + when(appAttempt.getRpcPort()).thenReturn(-100); + Container container = mock(Container.class); + when(container.getId()) + .thenReturn(ContainerId.newInstance(appAttemptId, 1)); + when(appAttempt.getMasterContainer()).thenReturn(container); + when(appAttempt.getDiagnostics()).thenReturn("test diagnostics info"); + when(appAttempt.getTrackingUrl()).thenReturn("test tracking url"); + when(appAttempt.getOriginalTrackingUrl()).thenReturn( + "test original tracking url"); + when(appAttempt.getFinalApplicationStatus()).thenReturn( + FinalApplicationStatus.UNDEFINED); + return appAttempt; + } + + private static RMContainer createRMContainer(ContainerId containerId) { + RMContainer container = mock(RMContainer.class); + when(container.getContainerId()).thenReturn(containerId); + when(container.getAllocatedNode()).thenReturn( + NodeId.newInstance("test host", -100)); + when(container.getAllocatedResource()).thenReturn( + Resource.newInstance(-1, -1)); + when(container.getAllocatedPriority()).thenReturn(Priority.UNDEFINED); + when(container.getCreationTime()).thenReturn(Integer.MAX_VALUE + 1L); + when(container.getFinishTime()).thenReturn(Integer.MAX_VALUE + 2L); + when(container.getDiagnosticsInfo()).thenReturn("test diagnostics info"); + when(container.getContainerExitStatus()).thenReturn(-1); + when(container.getContainerState()).thenReturn(ContainerState.COMPLETE); + return container; + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java index 2621dffcc74..a0ddc853909 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/RMStateStoreTestBase.java @@ -508,13 +508,13 @@ public void testEpoch(RMStateStoreHelper stateStoreHelper) RMStateStore store = stateStoreHelper.getRMStateStore(); store.setRMDispatcher(new TestDispatcher()); - int firstTimeEpoch = store.getAndIncrementEpoch(); + long firstTimeEpoch = store.getAndIncrementEpoch(); Assert.assertEquals(0, firstTimeEpoch); - int secondTimeEpoch = store.getAndIncrementEpoch(); + long secondTimeEpoch = store.getAndIncrementEpoch(); Assert.assertEquals(1, secondTimeEpoch); - int thirdTimeEpoch = store.getAndIncrementEpoch(); + long thirdTimeEpoch = store.getAndIncrementEpoch(); Assert.assertEquals(2, thirdTimeEpoch); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java index b63d2fe0e84..2fff71859e7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/MockRMApp.java @@ -50,6 +50,7 @@ public class MockRMApp implements RMApp { int failCount = 0; ApplicationId id; String url = null; + String oUrl = null; StringBuilder diagnostics = new StringBuilder(); RMAppAttempt attempt; int maxAppAttempts = 1; @@ -183,6 +184,15 @@ public void setTrackingUrl(String url) { this.url = url; } + @Override + public String getOriginalTrackingUrl() { + return oUrl; + } + + public void setOriginalTrackingUrl(String oUrl) { + this.oUrl = oUrl; + } + @Override public StringBuilder getDiagnostics() { return diagnostics; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 5874b5d7162..08749688b64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.resourcemanager.rmapp; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.reset; @@ -56,6 +57,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.RMServerUtils; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; @@ -94,6 +96,7 @@ public class TestRMAppTransitions { private DrainDispatcher rmDispatcher; private RMStateStore store; private RMApplicationHistoryWriter writer; + private SystemMetricsPublisher publisher; private YarnScheduler scheduler; private TestSchedulerEventDispatcher schedulerDispatcher; @@ -203,6 +206,8 @@ null, new AMRMTokenSecretManager(conf, this.rmContext), new ClientToAMTokenSecretManagerInRM(), writer); ((RMContextImpl)realRMContext).setStateStore(store); + publisher = mock(SystemMetricsPublisher.class); + ((RMContextImpl)realRMContext).setSystemMetricsPublisher(publisher); this.rmContext = spy(realRMContext); @@ -354,6 +359,7 @@ protected RMApp testCreateAppNewSaving( ApplicationSubmissionContext submissionContext) throws IOException { RMApp application = createNewTestApp(submissionContext); verify(writer).applicationStarted(any(RMApp.class)); + verify(publisher).appCreated(any(RMApp.class), anyLong()); // NEW => NEW_SAVING event RMAppEventType.START RMAppEvent event = new RMAppEvent(application.getApplicationId(), RMAppEventType.START); @@ -477,6 +483,7 @@ public void testUnmanagedApp() throws IOException { // reset the counter of Mockito.verify reset(writer); + reset(publisher); // test app fails after 1 app attempt failure LOG.info("--- START: testUnmanagedAppFailPath ---"); @@ -961,6 +968,10 @@ private void verifyApplicationFinished(RMAppState state) { ArgumentCaptor.forClass(RMAppState.class); verify(writer).applicationFinished(any(RMApp.class), finalState.capture()); Assert.assertEquals(state, finalState.getValue()); + finalState = ArgumentCaptor.forClass(RMAppState.class); + verify(publisher).appFinished(any(RMApp.class), finalState.capture(), + anyLong()); + Assert.assertEquals(state, finalState.getValue()); } private void verifyAppRemovedSchedulerEvent(RMAppState finalState) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index ae318b54768..6608ccd08e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -25,6 +25,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assume.assumeTrue; import static org.mockito.Matchers.any; +import static org.mockito.Matchers.anyLong; import static org.mockito.Matchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -72,6 +73,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEventType; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; @@ -134,6 +136,7 @@ public class TestRMAppAttemptTransitions { private AMLivelinessMonitor amLivelinessMonitor; private AMLivelinessMonitor amFinishingMonitor; private RMApplicationHistoryWriter writer; + private SystemMetricsPublisher publisher; private RMStateStore store; @@ -246,6 +249,8 @@ public void setUp() throws Exception { store = mock(RMStateStore.class); ((RMContextImpl) rmContext).setStateStore(store); + publisher = mock(SystemMetricsPublisher.class); + ((RMContextImpl) rmContext).setSystemMetricsPublisher(publisher); scheduler = mock(YarnScheduler.class); masterService = mock(ApplicationMasterService.class); @@ -1377,6 +1382,11 @@ private void verifyApplicationAttemptFinished(RMAppAttemptState state) { verify(writer).applicationAttemptFinished( any(RMAppAttempt.class), finalState.capture()); Assert.assertEquals(state, finalState.getValue()); + finalState = + ArgumentCaptor.forClass(RMAppAttemptState.class); + verify(publisher).appAttemptFinished(any(RMAppAttempt.class), finalState.capture(), + anyLong()); + Assert.assertEquals(state, finalState.getValue()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java index 9862cfebf2e..553587ed174 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/TestRMContainerImpl.java @@ -20,6 +20,7 @@ import static org.junit.Assert.assertEquals; import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyLong; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; @@ -51,6 +52,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; @@ -98,11 +100,13 @@ public void testReleaseWhileRunning() { Mockito.doReturn(rmApp).when(rmApps).get((ApplicationId)Matchers.any()); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); + SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class); RMContext rmContext = mock(RMContext.class); when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); when(rmContext.getRMApps()).thenReturn(rmApps); + when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); RMContainer rmContainer = new RMContainerImpl(container, appAttemptId, nodeId, "user", rmContext); @@ -111,6 +115,7 @@ public void testReleaseWhileRunning() { assertEquals(nodeId, rmContainer.getAllocatedNode()); assertEquals(priority, rmContainer.getAllocatedPriority()); verify(writer).containerStarted(any(RMContainer.class)); + verify(publisher).containerCreated(any(RMContainer.class), anyLong()); rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START)); @@ -143,6 +148,7 @@ public void testReleaseWhileRunning() { rmContainer.getContainerExitStatus()); assertEquals(ContainerState.COMPLETE, rmContainer.getContainerState()); verify(writer).containerFinished(any(RMContainer.class)); + verify(publisher).containerFinished(any(RMContainer.class), anyLong()); ArgumentCaptor captor = ArgumentCaptor .forClass(RMAppAttemptContainerFinishedEvent.class); @@ -184,10 +190,12 @@ public void testExpireWhileRunning() { "host:3465", resource, priority, null); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); + SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class); RMContext rmContext = mock(RMContext.class); when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); + when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); RMContainer rmContainer = new RMContainerImpl(container, appAttemptId, nodeId, "user", rmContext); @@ -196,6 +204,7 @@ public void testExpireWhileRunning() { assertEquals(nodeId, rmContainer.getAllocatedNode()); assertEquals(priority, rmContainer.getAllocatedPriority()); verify(writer).containerStarted(any(RMContainer.class)); + verify(publisher).containerCreated(any(RMContainer.class), anyLong()); rmContainer.handle(new RMContainerEvent(containerId, RMContainerEventType.START)); @@ -224,6 +233,8 @@ public void testExpireWhileRunning() { drainDispatcher.await(); assertEquals(RMContainerState.RUNNING, rmContainer.getState()); verify(writer, never()).containerFinished(any(RMContainer.class)); + verify(publisher, never()).containerFinished(any(RMContainer.class), + anyLong()); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java index 20a4aa8b6ea..c168b955c1e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerApplicationAttempt.java @@ -63,13 +63,13 @@ public void testMove() { ApplicationAttemptId appAttId = createAppAttemptId(0, 0); RMContext rmContext = mock(RMContext.class); - when(rmContext.getEpoch()).thenReturn(3); + when(rmContext.getEpoch()).thenReturn(3L); SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId, user, oldQueue, oldQueue.getActiveUsersManager(), rmContext); oldMetrics.submitApp(user); // confirm that containerId is calculated based on epoch. - assertEquals(app.getNewContainerId(), 0x00c00001); + assertEquals(0x30000000001L, app.getNewContainerId()); // Resource request Resource requestedResource = Resource.newInstance(1536, 2); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index fd14ef6e364..66ec0e6ee64 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; @@ -249,10 +250,12 @@ public void testSortedQueues() throws Exception { mock(ContainerAllocationExpirer.class); DrainDispatcher drainDispatcher = new DrainDispatcher(); RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); + SystemMetricsPublisher publisher = mock(SystemMetricsPublisher.class); RMContext rmContext = mock(RMContext.class); when(rmContext.getContainerAllocationExpirer()).thenReturn(expirer); when(rmContext.getDispatcher()).thenReturn(drainDispatcher); when(rmContext.getRMApplicationHistoryWriter()).thenReturn(writer); + when(rmContext.getSystemMetricsPublisher()).thenReturn(publisher); ApplicationAttemptId appAttemptId = BuilderUtils.newApplicationAttemptId( app_0.getApplicationId(), 1); ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index e5486617bb4..9cb902d2e25 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -42,6 +42,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; @@ -92,6 +93,7 @@ public EventHandler getEventHandler() { new RMContainerTokenSecretManager(conf), new NMTokenSecretManagerInRM(conf), new ClientToAMTokenSecretManagerInRM(), writer); + rmContext.setSystemMetricsPublisher(mock(SystemMetricsPublisher.class)); return rmContext; } @@ -170,7 +172,9 @@ public static ContainerId getMockContainerId(FiCaSchedulerApp application) { ContainerId containerId = mock(ContainerId.class); doReturn(application.getApplicationAttemptId()). when(containerId).getApplicationAttemptId(); - doReturn(application.getNewContainerId()).when(containerId).getId(); + long id = application.getNewContainerId(); + doReturn((int)id).when(containerId).getId(); + doReturn(id).when(containerId).getContainerId(); return containerId; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java index 20ff2c9cd25..34c33b4bd95 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestMaxRunningAppsEnforcer.java @@ -63,7 +63,7 @@ public void setup() throws Exception { maxAppsEnforcer = new MaxRunningAppsEnforcer(scheduler); appNum = 0; rmContext = mock(RMContext.class); - when(rmContext.getEpoch()).thenReturn(0); + when(rmContext.getEpoch()).thenReturn(0L); } private FSAppAttempt addApp(FSLeafQueue queue, String user) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 3d383647ba4..b4c4c1017dd 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.Task; import org.apache.hadoop.yarn.server.resourcemanager.ahs.RMApplicationHistoryWriter; +import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsPublisher; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; @@ -145,6 +146,8 @@ public void testAppAttemptMetrics() throws Exception { RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, null, null, null, writer); + ((RMContextImpl) rmContext).setSystemMetricsPublisher( + mock(SystemMetricsPublisher.class)); FifoScheduler scheduler = new FifoScheduler(); Configuration conf = new Configuration(); @@ -188,6 +191,8 @@ public void testNodeLocalAssignment() throws Exception { RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, writer); + ((RMContextImpl) rmContext).setSystemMetricsPublisher( + mock(SystemMetricsPublisher.class)); FifoScheduler scheduler = new FifoScheduler(); scheduler.setRMContext(rmContext); @@ -257,6 +262,8 @@ public void testUpdateResourceOnNode() throws Exception { RMApplicationHistoryWriter writer = mock(RMApplicationHistoryWriter.class); RMContext rmContext = new RMContextImpl(dispatcher, null, null, null, null, null, containerTokenSecretManager, nmTokenSecretManager, null, writer); + ((RMContextImpl) rmContext).setSystemMetricsPublisher( + mock(SystemMetricsPublisher.class)); FifoScheduler scheduler = new FifoScheduler(){ @SuppressWarnings("unused") diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm index 0fd5b242a13..c819f162afc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/ResourceManagerRest.apt.vm @@ -1494,11 +1494,11 @@ _01_000001 | amHostHttpAddress | string | The nodes http address of the application master | *---------------+--------------+--------------------------------+ | allocatedMB | int | The sum of memory in MB allocated to the application's running containers | -*---------------------------------------------------------------+ +*---------------+--------------+--------------------------------+ | allocatedVCores | int | The sum of virtual cores allocated to the application's running containers | -+---------------------------------------------------------------+ +*---------------+--------------+--------------------------------+ | runningContainers | int | The number of containers currently running for the application | -+---------------------------------------------------------------+ +*---------------+--------------+--------------------------------+ | memorySeconds | long | The amount of memory the application has allocated (megabyte-seconds) | *---------------+--------------+--------------------------------+ | vcoreSeconds | long | The amount of CPU resources the application has allocated (virtual core-seconds) |